Model RunTimeError

#8
by OlegXio - opened

┌───────────────────── Traceback (most recent call last) ─────────────────────┐
│ D:\PycharmProjects\XioAI\aiapi.py:2 in │
│ │
│ 1 from TTS.api import TTS │
│ > 2 tts = TTS("tts_models/multilingual/multi-dataset/xtts_v1", gpu=True) │
│ 3 │
│ 4 def copyvoice(name): │
│ 5 │ if name =='glados': │
│ │
│ C:\Users\1\AppData\Roaming\Python\Python310\site-packages\TTS\api.py:81 in │
init
│ │
│ 78 │ │ │
│ 79 │ │ if model_name is not None: │
│ 80 │ │ │ if "tts_models" in model_name or "coqui_studio" in model_ │
│ > 81 │ │ │ │ self.load_tts_model_by_name(model_name, gpu) │
│ 82 │ │ │ elif "voice_conversion_models" in model_name: │
│ 83 │ │ │ │ self.load_vc_model_by_name(model_name, gpu) │
│ 84 │
│ │
│ C:\Users\1\AppData\Roaming\Python\Python310\site-packages\TTS\api.py:185 in │
│ load_tts_model_by_name │
│ │
│ 182 │ │ │ │
│ 183 │ │ │ # init synthesizer │
│ 184 │ │ │ # None values are fetch from the model │
│ > 185 │ │ │ self.synthesizer = Synthesizer( │
│ 186 │ │ │ │ tts_checkpoint=model_path, │
│ 187 │ │ │ │ tts_config_path=config_path, │
│ 188 │ │ │ │ tts_speakers_file=None, │
│ │
│ C:\Users\1\AppData\Roaming\Python\Python310\site-packages\TTS\utils\synthes │
│ izer.py:109 in init
│ │
│ 106 │ │ │ │ self._load_fairseq_from_dir(model_dir, use_cuda) │
│ 107 │ │ │ │ self.output_sample_rate = self.tts_config.audio["samp │
│ 108 │ │ │ else: │
│ > 109 │ │ │ │ self._load_tts_from_dir(model_dir, use_cuda) │
│ 110 │ │ │ │ self.output_sample_rate = self.tts_config.audio["outp │
│ 111 │ │
│ 112 │ @staticmethod │
│ │
│ C:\Users\1\AppData\Roaming\Python\Python310\site-packages\TTS\utils\synthes │
│ izer.py:164 in load_tts_from_dir │
│ │
│ 161 │ │ config = load_config(os.path.join(model_dir, "config.json")) │
│ 162 │ │ self.tts_config = config │
│ 163 │ │ self.tts_model = setup_tts_model(config) │
│ > 164 │ │ self.tts_model.load_checkpoint(config, checkpoint_dir=model_d │
│ 165 │ │ if use_cuda: │
│ 166 │ │ │ self.tts_model.cuda() │
│ 167 │
│ │
│ C:\Users\1\AppData\Roaming\Python\Python310\site-packages\TTS\tts\models\xt │
│ ts.py:645 in load_checkpoint │
│ │
│ 642 │ │ self.init_models() │
│ 643 │ │ if eval: │
│ 644 │ │ │ self.gpt.init_gpt_for_inference(kv_cache=self.args.kv_cac │
│ > 645 │ │ self.load_state_dict(load_fsspec(model_path)["model"], strict │
│ 646 │ │ │
│ 647 │ │ if eval: │
│ 648 │ │ │ self.gpt.init_gpt_for_inference(kv_cache=self.args.kv_cac │
│ │
│ D:\Program │
│ Files\Python310\lib\site-packages\torch\nn\modules\module.py:2041 in │
│ load_state_dict │
│ │
│ 2038 │ │ │ │ │ │ ', '.join('"{}"'.format(k) for k in missing

│ 2039 │ │ │
│ 2040 │ │ if len(error_msgs) > 0: │
│ > 2041 │ │ │ raise RuntimeError('Error(s) in loading state_dict for { │
│ 2042 │ │ │ │ │ │ │ self.class.name, "\n\t".join( │
│ 2043 │ │ return _IncompatibleKeys(missing_keys, unexpected_keys) │
│ 2044 │
└─────────────────────────────────────────────────────────────────────────────┘
RuntimeError: Error(s) in loading state_dict for Xtts:
Missing key(s) in state_dict: "gpt.gpt.h.0.attn.bias",
"gpt.gpt.h.0.attn.masked_bias", "gpt.gpt.h.1.attn.bias",
"gpt.gpt.h.1.attn.masked_bias", "gpt.gpt.h.2.attn.bias",
"gpt.gpt.h.2.attn.masked_bias", "gpt.gpt.h.3.attn.bias",
"gpt.gpt.h.3.attn.masked_bias", "gpt.gpt.h.4.attn.bias",
"gpt.gpt.h.4.attn.masked_bias", "gpt.gpt.h.5.attn.bias",
"gpt.gpt.h.5.attn.masked_bias", "gpt.gpt.h.6.attn.bias",
"gpt.gpt.h.6.attn.masked_bias", "gpt.gpt.h.7.attn.bias",
"gpt.gpt.h.7.attn.masked_bias", "gpt.gpt.h.8.attn.bias",
"gpt.gpt.h.8.attn.masked_bias", "gpt.gpt.h.9.attn.bias",
"gpt.gpt.h.9.attn.masked_bias", "gpt.gpt.h.10.attn.bias",
"gpt.gpt.h.10.attn.masked_bias", "gpt.gpt.h.11.attn.bias",
"gpt.gpt.h.11.attn.masked_bias", "gpt.gpt.h.12.attn.bias",
"gpt.gpt.h.12.attn.masked_bias", "gpt.gpt.h.13.attn.bias",
"gpt.gpt.h.13.attn.masked_bias", "gpt.gpt.h.14.attn.bias",
"gpt.gpt.h.14.attn.masked_bias", "gpt.gpt.h.15.attn.bias",
"gpt.gpt.h.15.attn.masked_bias", "gpt.gpt.h.16.attn.bias",
"gpt.gpt.h.16.attn.masked_bias", "gpt.gpt.h.17.attn.bias",
"gpt.gpt.h.17.attn.masked_bias", "gpt.gpt.h.18.attn.bias",
"gpt.gpt.h.18.attn.masked_bias", "gpt.gpt.h.19.attn.bias",
"gpt.gpt.h.19.attn.masked_bias", "gpt.gpt.h.20.attn.bias",
"gpt.gpt.h.20.attn.masked_bias", "gpt.gpt.h.21.attn.bias",
"gpt.gpt.h.21.attn.masked_bias", "gpt.gpt.h.22.attn.bias",
"gpt.gpt.h.22.attn.masked_bias", "gpt.gpt.h.23.attn.bias",
"gpt.gpt.h.23.attn.masked_bias", "gpt.gpt.h.24.attn.bias",
"gpt.gpt.h.24.attn.masked_bias", "gpt.gpt.h.25.attn.bias",
"gpt.gpt.h.25.attn.masked_bias", "gpt.gpt.h.26.attn.bias",
"gpt.gpt.h.26.attn.masked_bias", "gpt.gpt.h.27.attn.bias",
"gpt.gpt.h.27.attn.masked_bias", "gpt.gpt.h.28.attn.bias",
"gpt.gpt.h.28.attn.masked_bias", "gpt.gpt.h.29.attn.bias",
"gpt.gpt.h.29.attn.masked_bias", "gpt.gpt_inference.transformer.h.0.attn.bias",
"gpt.gpt_inference.transformer.h.0.attn.masked_bias",
"gpt.gpt_inference.transformer.h.1.attn.bias",
"gpt.gpt_inference.transformer.h.1.attn.masked_bias",
"gpt.gpt_inference.transformer.h.2.attn.bias",
"gpt.gpt_inference.transformer.h.2.attn.masked_bias",
"gpt.gpt_inference.transformer.h.3.attn.bias",
"gpt.gpt_inference.transformer.h.3.attn.masked_bias",
"gpt.gpt_inference.transformer.h.4.attn.bias",
"gpt.gpt_inference.transformer.h.4.attn.masked_bias",
"gpt.gpt_inference.transformer.h.5.attn.bias",
"gpt.gpt_inference.transformer.h.5.attn.masked_bias",
"gpt.gpt_inference.transformer.h.6.attn.bias",
"gpt.gpt_inference.transformer.h.6.attn.masked_bias",
"gpt.gpt_inference.transformer.h.7.attn.bias",
"gpt.gpt_inference.transformer.h.7.attn.masked_bias",
"gpt.gpt_inference.transformer.h.8.attn.bias",
"gpt.gpt_inference.transformer.h.8.attn.masked_bias",
"gpt.gpt_inference.transformer.h.9.attn.bias",
"gpt.gpt_inference.transformer.h.9.attn.masked_bias",
"gpt.gpt_inference.transformer.h.10.attn.bias",
"gpt.gpt_inference.transformer.h.10.attn.masked_bias",
"gpt.gpt_inference.transformer.h.11.attn.bias",
"gpt.gpt_inference.transformer.h.11.attn.masked_bias",
"gpt.gpt_inference.transformer.h.12.attn.bias",
"gpt.gpt_inference.transformer.h.12.attn.masked_bias",
"gpt.gpt_inference.transformer.h.13.attn.bias",
"gpt.gpt_inference.transformer.h.13.attn.masked_bias",
"gpt.gpt_inference.transformer.h.14.attn.bias",
"gpt.gpt_inference.transformer.h.14.attn.masked_bias",
"gpt.gpt_inference.transformer.h.15.attn.bias",
"gpt.gpt_inference.transformer.h.15.attn.masked_bias",
"gpt.gpt_inference.transformer.h.16.attn.bias",
"gpt.gpt_inference.transformer.h.16.attn.masked_bias",
"gpt.gpt_inference.transformer.h.17.attn.bias",
"gpt.gpt_inference.transformer.h.17.attn.masked_bias",
"gpt.gpt_inference.transformer.h.18.attn.bias",
"gpt.gpt_inference.transformer.h.18.attn.masked_bias",
"gpt.gpt_inference.transformer.h.19.attn.bias",
"gpt.gpt_inference.transformer.h.19.attn.masked_bias",
"gpt.gpt_inference.transformer.h.20.attn.bias",
"gpt.gpt_inference.transformer.h.20.attn.masked_bias",
"gpt.gpt_inference.transformer.h.21.attn.bias",
"gpt.gpt_inference.transformer.h.21.attn.masked_bias",
"gpt.gpt_inference.transformer.h.22.attn.bias",
"gpt.gpt_inference.transformer.h.22.attn.masked_bias",
"gpt.gpt_inference.transformer.h.23.attn.bias",
"gpt.gpt_inference.transformer.h.23.attn.masked_bias",
"gpt.gpt_inference.transformer.h.24.attn.bias",
"gpt.gpt_inference.transformer.h.24.attn.masked_bias",
"gpt.gpt_inference.transformer.h.25.attn.bias",
"gpt.gpt_inference.transformer.h.25.attn.masked_bias",
"gpt.gpt_inference.transformer.h.26.attn.bias",
"gpt.gpt_inference.transformer.h.26.attn.masked_bias",
"gpt.gpt_inference.transformer.h.27.attn.bias",
"gpt.gpt_inference.transformer.h.27.attn.masked_bias",
"gpt.gpt_inference.transformer.h.28.attn.bias",
"gpt.gpt_inference.transformer.h.28.attn.masked_bias",
"gpt.gpt_inference.transformer.h.29.attn.bias",
"gpt.gpt_inference.transformer.h.29.attn.masked_bias".

@gorkemgoknar
I received
ImportError: accelerate>=0.20.3 is required for a normal functioning of this module, but found accelerate==0.18.0.
Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main
I have entered these two commands and the error remains the same

@OlegXio maybe you did not download model (but folder is created, it may happen if you did not Agree TOS at first download)
I suggest checking on model folder

This should print path check inside and if there is no file in it delete the folder xtts_v1

import os 
from TTS.utils.generic_utils import get_user_data_dir
model_path = os.path.join(get_user_data_dir("tts"), "tts_models--multilingual--multi-dataset--xtts_v1")
print(model_path)

Then after deleting xtts_v1 folder you can download model within code with this

import os 
os.environ["COQUI_TOS_AGREED"] = "1"
from TTS.api import TTS
tts = TTS("tts_models/multilingual/multi-dataset/xtts_v1") 
Coqui.ai org

Please update your TTS==0.17.8 should fix these issues plus speed boosted vocoder

gorkemgoknar changed discussion status to closed

Sign up or log in to comment