jbetker commited on
Commit
c52cc78
1 Parent(s): b4c568a
Files changed (5) hide show
  1. .gitignore +1 -0
  2. api.py +21 -22
  3. do_tts.py +6 -6
  4. read.py +40 -32
  5. utils/audio.py +13 -0
.gitignore CHANGED
@@ -20,6 +20,7 @@ parts/
20
  sdist/
21
  var/
22
  wheels/
 
23
  pip-wheel-metadata/
24
  share/python-wheels/
25
  *.egg-info/
 
20
  sdist/
21
  var/
22
  wheels/
23
+ results/*
24
  pip-wheel-metadata/
25
  share/python-wheels/
26
  *.egg-info/
api.py CHANGED
@@ -150,7 +150,7 @@ def do_spectrogram_diffusion(diffusion_model, diffuser, mel_codes, conditioning_
150
 
151
 
152
  class TextToSpeech:
153
- def __init__(self, autoregressive_batch_size=32):
154
  self.autoregressive_batch_size = autoregressive_batch_size
155
  self.tokenizer = VoiceBpeTokenizer()
156
  download_models()
@@ -160,14 +160,7 @@ class TextToSpeech:
160
  heads=16, number_text_tokens=256, start_text_token=255, checkpointing=False,
161
  train_solo_embeddings=False,
162
  average_conditioning_embeddings=True).cpu().eval()
163
- self.autoregressive.load_state_dict(torch.load('.models/autoregressive_audiobooks.pth'))
164
-
165
- self.autoregressive_for_latents = UnifiedVoice(max_mel_tokens=604, max_text_tokens=402, max_conditioning_inputs=2, layers=30,
166
- model_dim=1024,
167
- heads=16, number_text_tokens=256, start_text_token=255, checkpointing=False,
168
- train_solo_embeddings=False,
169
- average_conditioning_embeddings=True).cpu().eval()
170
- self.autoregressive_for_latents.load_state_dict(torch.load('.models/autoregressive_audiobooks.pth'))
171
 
172
  self.clip = VoiceCLIP(dim_text=512, dim_speech=512, dim_latent=512, num_text_tokens=256, text_enc_depth=12,
173
  text_seq_len=350, text_heads=8,
@@ -178,32 +171,38 @@ class TextToSpeech:
178
  self.diffusion = DiffusionTts(model_channels=1024, num_layers=10, in_channels=100, out_channels=200,
179
  in_latent_channels=1024, in_tokens=8193, dropout=0, use_fp16=False, num_heads=16,
180
  layer_drop=0, unconditioned_percentage=0).cpu().eval()
181
- self.diffusion.load_state_dict(torch.load('.models/diffusion_decoder_audiobooks.pth'))
182
 
183
  self.vocoder = UnivNetGenerator().cpu()
184
  self.vocoder.load_state_dict(torch.load('.models/vocoder.pth')['model_g'])
185
  self.vocoder.eval(inference=True)
186
 
187
- def tts_with_preset(self, text, voice_samples, preset='intelligible', **kwargs):
188
  """
189
  Calls TTS with one of a set of preset generation parameters. Options:
190
- 'intelligible': Maximizes the probability of understandable words at the cost of diverse voices, intonation and prosody.
191
- 'realistic': Increases the diversity of spoken voices and improves realism of vocal characteristics at the cost of intelligibility.
192
- 'mid': Somewhere between 'intelligible' and 'realistic'.
 
193
  """
 
 
 
 
194
  presets = {
195
- 'intelligible': {'temperature': .5, 'length_penalty': 2.0, 'repetition_penalty': 2.0, 'top_p': .5, 'diffusion_iterations': 100, 'cond_free': True, 'cond_free_k': .7, 'diffusion_temperature': .7},
196
- 'mid': {'temperature': .7, 'length_penalty': 1.0, 'repetition_penalty': 2.0, 'top_p': .7, 'diffusion_iterations': 100, 'cond_free': True, 'cond_free_k': 1.5, 'diffusion_temperature': .8},
197
- 'realistic': {'temperature': 1.0, 'length_penalty': 1.0, 'repetition_penalty': 2.0, 'top_p': .9, 'diffusion_iterations': 100, 'cond_free': True, 'cond_free_k': 2, 'diffusion_temperature': 1},
 
198
  }
199
  kwargs.update(presets[preset])
200
  return self.tts(text, voice_samples, **kwargs)
201
 
202
  def tts(self, text, voice_samples, k=1,
203
  # autoregressive generation parameters follow
204
- num_autoregressive_samples=512, temperature=.5, length_penalty=1, repetition_penalty=2.0, top_p=.5,
205
  # diffusion generation parameters follow
206
- diffusion_iterations=100, cond_free=True, cond_free_k=2, diffusion_temperature=.7,):
207
  text = torch.IntTensor(self.tokenizer.encode(text)).unsqueeze(0).cuda()
208
  text = F.pad(text, (0, 1)) # This may not be necessary.
209
 
@@ -250,11 +249,11 @@ class TextToSpeech:
250
  # The diffusion model actually wants the last hidden layer from the autoregressive model as conditioning
251
  # inputs. Re-produce those for the top results. This could be made more efficient by storing all of these
252
  # results, but will increase memory usage.
253
- self.autoregressive_for_latents = self.autoregressive_for_latents.cuda()
254
- best_latents = self.autoregressive_for_latents(conds, text, torch.tensor([text.shape[-1]], device=conds.device), best_results,
255
  torch.tensor([best_results.shape[-1]*self.autoregressive.mel_length_compression], device=conds.device),
256
  return_latent=True, clip_inputs=False)
257
- self.autoregressive_for_latents = self.autoregressive_for_latents.cpu()
258
 
259
  print("Performing vocoding..")
260
  wav_candidates = []
 
150
 
151
 
152
  class TextToSpeech:
153
+ def __init__(self, autoregressive_batch_size=16):
154
  self.autoregressive_batch_size = autoregressive_batch_size
155
  self.tokenizer = VoiceBpeTokenizer()
156
  download_models()
 
160
  heads=16, number_text_tokens=256, start_text_token=255, checkpointing=False,
161
  train_solo_embeddings=False,
162
  average_conditioning_embeddings=True).cpu().eval()
163
+ self.autoregressive.load_state_dict(torch.load('.models/autoregressive.pth'))
 
 
 
 
 
 
 
164
 
165
  self.clip = VoiceCLIP(dim_text=512, dim_speech=512, dim_latent=512, num_text_tokens=256, text_enc_depth=12,
166
  text_seq_len=350, text_heads=8,
 
171
  self.diffusion = DiffusionTts(model_channels=1024, num_layers=10, in_channels=100, out_channels=200,
172
  in_latent_channels=1024, in_tokens=8193, dropout=0, use_fp16=False, num_heads=16,
173
  layer_drop=0, unconditioned_percentage=0).cpu().eval()
174
+ self.diffusion.load_state_dict(torch.load('.models/diffusion_decoder.pth'))
175
 
176
  self.vocoder = UnivNetGenerator().cpu()
177
  self.vocoder.load_state_dict(torch.load('.models/vocoder.pth')['model_g'])
178
  self.vocoder.eval(inference=True)
179
 
180
+ def tts_with_preset(self, text, voice_samples, preset='fast', **kwargs):
181
  """
182
  Calls TTS with one of a set of preset generation parameters. Options:
183
+ 'ultra_fast': Produces speech at a speed which belies the name of this repo. (Not really, but it's definitely fastest).
184
+ 'fast': Decent quality speech at a decent inference rate. A good choice for mass inference.
185
+ 'standard': Very good quality. This is generally about as good as you are going to get.
186
+ 'high_quality': Use if you want the absolute best. This is not really worth the compute, though.
187
  """
188
+ # Use generally found best tuning knobs for generation.
189
+ kwargs.update({'temperature': .8, 'length_penalty': 1.0, 'repetition_penalty': 2.0, 'top_p': .8,
190
+ 'cond_free_k': 2.0, 'diffusion_temperature': 1.0})
191
+ # Presets are defined here.
192
  presets = {
193
+ 'ultra_fast': {'num_autoregressive_samples': 32, 'diffusion_iterations': 16, 'cond_free': False},
194
+ 'fast': {'num_autoregressive_samples': 96, 'diffusion_iterations': 32},
195
+ 'standard': {'num_autoregressive_samples': 256, 'diffusion_iterations': 128},
196
+ 'high_quality': {'num_autoregressive_samples': 512, 'diffusion_iterations': 2048},
197
  }
198
  kwargs.update(presets[preset])
199
  return self.tts(text, voice_samples, **kwargs)
200
 
201
  def tts(self, text, voice_samples, k=1,
202
  # autoregressive generation parameters follow
203
+ num_autoregressive_samples=512, temperature=.8, length_penalty=1, repetition_penalty=2.0, top_p=.8,
204
  # diffusion generation parameters follow
205
+ diffusion_iterations=100, cond_free=True, cond_free_k=2, diffusion_temperature=1.0,):
206
  text = torch.IntTensor(self.tokenizer.encode(text)).unsqueeze(0).cuda()
207
  text = F.pad(text, (0, 1)) # This may not be necessary.
208
 
 
249
  # The diffusion model actually wants the last hidden layer from the autoregressive model as conditioning
250
  # inputs. Re-produce those for the top results. This could be made more efficient by storing all of these
251
  # results, but will increase memory usage.
252
+ self.autoregressive = self.autoregressive.cuda()
253
+ best_latents = self.autoregressive(conds, text, torch.tensor([text.shape[-1]], device=conds.device), best_results,
254
  torch.tensor([best_results.shape[-1]*self.autoregressive.mel_length_compression], device=conds.device),
255
  return_latent=True, clip_inputs=False)
256
+ self.autoregressive = self.autoregressive.cpu()
257
 
258
  print("Performing vocoding..")
259
  wav_candidates = []
do_tts.py CHANGED
@@ -27,12 +27,12 @@ if __name__ == '__main__':
27
  }
28
 
29
  parser = argparse.ArgumentParser()
30
- parser.add_argument('-text', type=str, help='Text to speak.', default="I am a language model that has learned to speak.")
31
- parser.add_argument('-voice', type=str, help='Use a preset conditioning voice (defined above). Overrides cond_path.', default='obama,dotrice,harris,lescault,otto,atkins,grace,kennard,mol')
32
- parser.add_argument('-num_samples', type=int, help='How many total outputs the autoregressive transformer should produce.', default=128)
33
- parser.add_argument('-batch_size', type=int, help='How many samples to process at once in the autoregressive model.', default=16)
34
- parser.add_argument('-num_diffusion_samples', type=int, help='Number of outputs that progress to the diffusion stage.', default=16)
35
- parser.add_argument('-output_path', type=str, help='Where to store outputs.', default='results/')
36
  args = parser.parse_args()
37
  os.makedirs(args.output_path, exist_ok=True)
38
 
 
27
  }
28
 
29
  parser = argparse.ArgumentParser()
30
+ parser.add_argument('--text', type=str, help='Text to speak.', default="I am a language model that has learned to speak.")
31
+ parser.add_argument('--voice', type=str, help='Use a preset conditioning voice (defined above). Overrides cond_path.', default='obama,dotrice,harris,lescault,otto,atkins,grace,kennard,mol')
32
+ parser.add_argument('--num_samples', type=int, help='How many total outputs the autoregressive transformer should produce.', default=128)
33
+ parser.add_argument('--batch_size', type=int, help='How many samples to process at once in the autoregressive model.', default=16)
34
+ parser.add_argument('--num_diffusion_samples', type=int, help='Number of outputs that progress to the diffusion stage.', default=16)
35
+ parser.add_argument('--output_path', type=str, help='Where to store outputs.', default='results/')
36
  args = parser.parse_args()
37
  os.makedirs(args.output_path, exist_ok=True)
38
 
read.py CHANGED
@@ -6,7 +6,7 @@ import torch.nn.functional as F
6
  import torchaudio
7
 
8
  from api import TextToSpeech, load_conditioning
9
- from utils.audio import load_audio
10
  from utils.tokenizer import VoiceBpeTokenizer
11
 
12
  def split_and_recombine_text(texts, desired_length=200, max_len=300):
@@ -27,41 +27,49 @@ def split_and_recombine_text(texts, desired_length=200, max_len=300):
27
  return texts
28
 
29
  if __name__ == '__main__':
30
- # These are voices drawn randomly from the training set. You are free to substitute your own voices in, but testing
31
- # has shown that the model does not generalize to new voices very well.
32
- preselected_cond_voices = {
33
- 'emma_stone': ['voices/emma_stone/1.wav','voices/emma_stone/2.wav','voices/emma_stone/3.wav'],
34
- 'tom_hanks': ['voices/tom_hanks/1.wav','voices/tom_hanks/2.wav','voices/tom_hanks/3.wav'],
35
- 'patrick_stewart': ['voices/patrick_stewart/1.wav','voices/patrick_stewart/2.wav','voices/patrick_stewart/3.wav','voices/patrick_stewart/4.wav'],
36
- }
37
-
38
  parser = argparse.ArgumentParser()
39
- parser.add_argument('-textfile', type=str, help='A file containing the text to read.', default="data/riding_hood.txt")
40
- parser.add_argument('-voice', type=str, help='Use a preset conditioning voice (defined above). Overrides cond_path.', default='patrick_stewart')
41
- parser.add_argument('-num_samples', type=int, help='How many total outputs the autoregressive transformer should produce.', default=128)
42
- parser.add_argument('-batch_size', type=int, help='How many samples to process at once in the autoregressive model.', default=16)
43
- parser.add_argument('-output_path', type=str, help='Where to store outputs.', default='results/longform/')
44
- parser.add_argument('-generation_preset', type=str, help='Preset to use for generation', default='realistic')
45
  args = parser.parse_args()
46
- os.makedirs(args.output_path, exist_ok=True)
47
 
48
- with open(args.textfile, 'r', encoding='utf-8') as f:
49
- text = ''.join([l for l in f.readlines()])
50
- texts = split_and_recombine_text(text)
 
 
 
 
 
 
 
 
51
 
52
- tts = TextToSpeech(autoregressive_batch_size=args.batch_size)
 
 
 
 
 
 
 
 
 
 
 
53
 
54
- priors = []
55
- for j, text in enumerate(texts):
56
- cond_paths = preselected_cond_voices[args.voice]
57
- conds = priors.copy()
58
- for cond_path in cond_paths:
59
- c = load_audio(cond_path, 22050)
60
- conds.append(c)
61
- gen = tts.tts_with_preset(text, conds, preset=args.generation_preset, num_autoregressive_samples=args.num_samples)
62
- torchaudio.save(os.path.join(args.output_path, f'{j}.wav'), gen.squeeze(0).cpu(), 24000)
63
 
64
- priors.append(torchaudio.functional.resample(gen, 24000, 22050).squeeze(0))
65
- while len(priors) > 2:
66
- priors.pop(0)
67
 
 
6
  import torchaudio
7
 
8
  from api import TextToSpeech, load_conditioning
9
+ from utils.audio import load_audio, get_voices
10
  from utils.tokenizer import VoiceBpeTokenizer
11
 
12
  def split_and_recombine_text(texts, desired_length=200, max_len=300):
 
27
  return texts
28
 
29
  if __name__ == '__main__':
 
 
 
 
 
 
 
 
30
  parser = argparse.ArgumentParser()
31
+ parser.add_argument('--textfile', type=str, help='A file containing the text to read.', default="data/riding_hood.txt")
32
+ parser.add_argument('--voice', type=str, help='Selects the voice to use for generation. See options in voices/ directory (and add your own!) '
33
+ 'Use the & character to join two voices together. Use a comma to perform inference on multiple voices.', default='patrick_stewart')
34
+ parser.add_argument('--output_path', type=str, help='Where to store outputs.', default='results/longform/')
35
+ parser.add_argument('--generation_preset', type=str, help='Preset to use for generation', default='standard')
 
36
  args = parser.parse_args()
 
37
 
38
+ outpath = args.output_path
39
+ voices = get_voices()
40
+ selected_voices = args.voice.split(',')
41
+ for selected_voice in selected_voices:
42
+ voice_outpath = os.path.join(outpath, selected_voice)
43
+ os.makedirs(voice_outpath, exist_ok=True)
44
+
45
+ with open(args.textfile, 'r', encoding='utf-8') as f:
46
+ text = ''.join([l for l in f.readlines()])
47
+ texts = split_and_recombine_text(text)
48
+ tts = TextToSpeech()
49
 
50
+ if '&' in selected_voice:
51
+ voice_sel = selected_voice.split('&')
52
+ else:
53
+ voice_sel = [selected_voice]
54
+ cond_paths = []
55
+ for vsel in voice_sel:
56
+ if vsel not in voices.keys():
57
+ print(f'Error: voice {vsel} not available. Skipping.')
58
+ continue
59
+ cond_paths.extend(voices[vsel])
60
+ if not cond_paths:
61
+ print('Error: no valid voices specified. Try again.')
62
 
63
+ priors = []
64
+ for j, text in enumerate(texts):
65
+ conds = priors.copy()
66
+ for cond_path in cond_paths:
67
+ c = load_audio(cond_path, 22050)
68
+ conds.append(c)
69
+ gen = tts.tts_with_preset(text, conds, preset=args.generation_preset)
70
+ torchaudio.save(os.path.join(voice_outpath, f'{j}.wav'), gen.squeeze(0).cpu(), 24000)
 
71
 
72
+ priors.append(torchaudio.functional.resample(gen, 24000, 22050).squeeze(0))
73
+ while len(priors) > 2:
74
+ priors.pop(0)
75
 
utils/audio.py CHANGED
@@ -1,3 +1,6 @@
 
 
 
1
  import torch
2
  import torchaudio
3
  import numpy as np
@@ -78,6 +81,16 @@ def dynamic_range_decompression(x, C=1):
78
  return torch.exp(x) / C
79
 
80
 
 
 
 
 
 
 
 
 
 
 
81
  class TacotronSTFT(torch.nn.Module):
82
  def __init__(self, filter_length=1024, hop_length=256, win_length=1024,
83
  n_mel_channels=80, sampling_rate=22050, mel_fmin=0.0,
 
1
+ import os
2
+ from glob import glob
3
+
4
  import torch
5
  import torchaudio
6
  import numpy as np
 
81
  return torch.exp(x) / C
82
 
83
 
84
+ def get_voices():
85
+ subs = os.listdir('voices')
86
+ voices = {}
87
+ for sub in subs:
88
+ subj = os.path.join('voices', sub)
89
+ if os.path.isdir(subj):
90
+ voices[sub] = glob(f'{subj}/*.wav')
91
+ return voices
92
+
93
+
94
  class TacotronSTFT(torch.nn.Module):
95
  def __init__(self, filter_length=1024, hop_length=256, win_length=1024,
96
  n_mel_channels=80, sampling_rate=22050, mel_fmin=0.0,