--------------------------------------------------------------------------- ImportError Traceback (most recent call last) Cell In[7], line 15 13 #生成语音 14 with torch.no_grad(): ---> 15 output = model(**inputs).waveform File ~/miniconda3/envs/tts2/lib/python3.8/site-packages/torch/nn/modules/module.py:1102, in Module._call_impl(self, *input, **kwargs) 1098 # If we don't have any hooks, we want to skip the rest of the logic in 1099 # this function, and just call forward. 1100 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks 1101 or _global_forward_hooks or _global_forward_pre_hooks): -> 1102 return forward_call(*input, **kwargs) 1103 # Do not call functions when jit is used 1104 full_backward_hooks, non_full_backward_hooks = [], [] File ~/miniconda3/envs/tts2/lib/python3.8/site-packages/transformers/models/vits/modeling_vits.py:1423, in VitsModel.forward(self, input_ids, attention_mask, speaker_id, output_attentions, output_hidden_states, return_dict, labels) 1420 else: 1421 speaker_embeddings = None -> 1423 text_encoder_output = self.text_encoder( 1424 input_ids=input_ids, 1425 padding_mask=input_padding_mask, 1426 attention_mask=attention_mask, 1427 output_attentions=output_attentions, 1428 output_hidden_states=output_hidden_states, 1429 return_dict=return_dict, 1430 ) 1431 hidden_states = text_encoder_output[0] if not return_dict else text_encoder_output.last_hidden_state 1432 hidden_states = hidden_states.transpose(1, 2) File ~/miniconda3/envs/tts2/lib/python3.8/site-packages/torch/nn/modules/module.py:1102, in Module._call_impl(self, *input, **kwargs) 1098 # If we don't have any hooks, we want to skip the rest of the logic in 1099 # this function, and just call forward. 1100 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks 1101 or _global_forward_hooks or _global_forward_pre_hooks): -> 1102 return forward_call(*input, **kwargs) 1103 # Do not call functions when jit is used 1104 full_backward_hooks, non_full_backward_hooks = [], [] File ~/miniconda3/envs/tts2/lib/python3.8/site-packages/transformers/models/vits/modeling_vits.py:1222, in VitsTextEncoder.forward(self, input_ids, padding_mask, attention_mask, output_attentions, output_hidden_states, return_dict) 1211 def forward( 1212 self, 1213 input_ids: torch.Tensor, (...) 1218 return_dict: Optional[bool] = True, 1219 ) -> Union[Tuple[torch.Tensor], VitsTextEncoderOutput]: 1220 hidden_states = self.embed_tokens(input_ids) * math.sqrt(self.config.hidden_size) -> 1222 encoder_outputs = self.encoder( 1223 hidden_states=hidden_states, 1224 padding_mask=padding_mask, 1225 attention_mask=attention_mask, 1226 output_attentions=output_attentions, 1227 output_hidden_states=output_hidden_states, 1228 return_dict=return_dict, 1229 ) 1231 last_hidden_state = encoder_outputs[0] if not return_dict else encoder_outputs.last_hidden_state 1233 stats = self.project(last_hidden_state.transpose(1, 2)).transpose(1, 2) * padding_mask File ~/miniconda3/envs/tts2/lib/python3.8/site-packages/torch/nn/modules/module.py:1102, in Module._call_impl(self, *input, **kwargs) 1098 # If we don't have any hooks, we want to skip the rest of the logic in 1099 # this function, and just call forward. 1100 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks 1101 or _global_forward_hooks or _global_forward_pre_hooks): -> 1102 return forward_call(*input, **kwargs) 1103 # Do not call functions when jit is used 1104 full_backward_hooks, non_full_backward_hooks = [], [] File ~/miniconda3/envs/tts2/lib/python3.8/site-packages/transformers/models/vits/modeling_vits.py:1143, in VitsEncoder.forward(self, hidden_states, padding_mask, attention_mask, output_attentions, output_hidden_states, return_dict) 1139 attention_mask = _prepare_4d_attention_mask(attention_mask, hidden_states.dtype) 1141 hidden_states = hidden_states * padding_mask -> 1143 synced_gpus = is_deepspeed_zero3_enabled() or is_fsdp_managed_module(self) 1145 for encoder_layer in self.layers: 1146 if output_hidden_states: File ~/miniconda3/envs/tts2/lib/python3.8/site-packages/transformers/integrations/fsdp.py:29, in is_fsdp_managed_module(module) 26 if not is_torch_available(): 27 return False ---> 29 import torch.distributed.fsdp 31 return isinstance(module, torch.distributed.fsdp.FullyShardedDataParallel) or getattr( 32 module, "_is_fsdp_managed_module", False 33 ) File ~/miniconda3/envs/tts2/lib/python3.8/site-packages/torch/distributed/fsdp/__init__.py:1 ----> 1 from ._flat_param import FlatParameter as FlatParameter 2 from .fully_sharded_data_parallel import ( 3 BackwardPrefetch, 4 CPUOffload, (...) 18 StateDictType, 19 ) 21 __all__ = [ 22 "BackwardPrefetch", 23 "CPUOffload", (...) 37 "StateDictType", 38 ] File ~/miniconda3/envs/tts2/lib/python3.8/site-packages/torch/distributed/fsdp/_flat_param.py:31 29 import torch.nn.functional as F 30 from torch import Tensor ---> 31 from torch.distributed.fsdp._common_utils import ( 32 _FSDPDeviceHandle, 33 _named_parameters_with_duplicates, 34 _no_dispatch_record_stream, 35 _set_fsdp_flattened, 36 HandleTrainingState, 37 ) 38 from torch.distributed.utils import ( 39 _alloc_storage, 40 _data_ptr_allocated, 41 _free_storage, 42 _p_assert, 43 ) 44 from torch.nn.parameter import _ParameterMeta # type: ignore[attr-defined] File ~/miniconda3/envs/tts2/lib/python3.8/site-packages/torch/distributed/fsdp/_common_utils.py:32 30 import torch.nn as nn 31 from torch.distributed._composable_state import _get_module_state, _State ---> 32 from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( 33 _CHECKPOINT_PREFIX, 34 ) 35 from torch.distributed.utils import _apply_to_tensors 36 from torch.utils._mode_utils import no_dispatch File ~/miniconda3/envs/tts2/lib/python3.8/site-packages/torch/distributed/algorithms/_checkpoint/checkpoint_wrapper.py:10 8 import torch.nn as nn 9 from torch.autograd.graph import save_on_cpu ---> 10 from torch.distributed.utils import _pack_kwargs, _replace_by_prefix, _unpack_kwargs 11 from torch.utils.checkpoint import checkpoint as torch_utils_checkpoint 13 _CHECKPOINT_WRAPPED_MODULE = "_checkpoint_wrapped_module" File ~/miniconda3/envs/tts2/lib/python3.8/site-packages/torch/distributed/utils.py:21 19 from torch import nn 20 from torch.nn.parallel._functions import _get_stream ---> 21 from torch.nn.parallel.scatter_gather import _is_namedtuple 22 from torch.nn.utils.rnn import PackedSequence 24 __all__ = [] # type: ignore[var-annotated] ImportError: cannot import name '_is_namedtuple' from 'torch.nn.parallel.scatter_gather' (/root/miniconda3/envs/tts2/lib/python3.8/site-packages/torch/nn/parallel/scatter_gather.py)