unijoh commited on
Commit
51e6dce
1 Parent(s): cb292d6

Update tts.py

Browse files
Files changed (1) hide show
  1. tts.py +10 -11
tts.py CHANGED
@@ -1,22 +1,21 @@
1
  import torch
2
- from transformers import Wav2Vec2Processor, Wav2Vec2ForCTC
3
  import logging
4
  import numpy as np
5
  import soundfile as sf
6
- from huggingface_hub import hf_hub_download
7
 
8
  # Set up logging
9
  logging.basicConfig(level=logging.DEBUG)
10
 
11
  MODEL_ID = "facebook/mms-tts-fao"
12
 
13
- # Try to load the model and processor
14
  try:
15
- processor = Wav2Vec2Processor.from_pretrained(MODEL_ID)
16
- model = Wav2Vec2ForCTC.from_pretrained(MODEL_ID)
17
- logging.info("Model and processor loaded successfully.")
18
  except Exception as e:
19
- logging.error(f"Error loading model or processor: {e}")
20
  raise
21
 
22
  def synthesize_speech(text):
@@ -26,18 +25,18 @@ def synthesize_speech(text):
26
  logging.error("Text input is empty.")
27
  return None
28
 
29
- inputs = processor(text, return_tensors="pt")
30
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
31
  model.to(device)
32
  inputs = inputs.to(device)
33
 
34
  with torch.no_grad():
35
- speech = model.generate(**inputs)
36
 
37
  logging.info("Speech generated successfully.")
38
 
39
- # Decode the generated speech and save to an audio file
40
- waveform = speech.cpu().numpy().flatten()
41
  # Normalize waveform to the range [-1, 1]
42
  waveform = np.clip(waveform, -1.0, 1.0)
43
 
 
1
  import torch
2
+ from transformers import AutoTokenizer, AutoModelForTextToWaveform
3
  import logging
4
  import numpy as np
5
  import soundfile as sf
 
6
 
7
  # Set up logging
8
  logging.basicConfig(level=logging.DEBUG)
9
 
10
  MODEL_ID = "facebook/mms-tts-fao"
11
 
12
+ # Try to load the model and tokenizer
13
  try:
14
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
15
+ model = AutoModelForTextToWaveform.from_pretrained(MODEL_ID)
16
+ logging.info("Model and tokenizer loaded successfully.")
17
  except Exception as e:
18
+ logging.error(f"Error loading model or tokenizer: {e}")
19
  raise
20
 
21
  def synthesize_speech(text):
 
25
  logging.error("Text input is empty.")
26
  return None
27
 
28
+ inputs = tokenizer(text, return_tensors="pt")
29
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
30
  model.to(device)
31
  inputs = inputs.to(device)
32
 
33
  with torch.no_grad():
34
+ outputs = model.generate(**inputs)
35
 
36
  logging.info("Speech generated successfully.")
37
 
38
+ # Convert outputs to waveform
39
+ waveform = outputs.cpu().numpy().flatten()
40
  # Normalize waveform to the range [-1, 1]
41
  waveform = np.clip(waveform, -1.0, 1.0)
42