jbetker commited on
Commit
713281e
1 Parent(s): c52cc78

update api constants

Browse files
Files changed (3) hide show
  1. api.py +7 -3
  2. eval_multiple.py +2 -4
  3. models/cvvp.py +0 -0
api.py CHANGED
@@ -186,7 +186,9 @@ class TextToSpeech:
186
  'high_quality': Use if you want the absolute best. This is not really worth the compute, though.
187
  """
188
  # Use generally found best tuning knobs for generation.
189
- kwargs.update({'temperature': .8, 'length_penalty': 1.0, 'repetition_penalty': 2.0, 'top_p': .8,
 
 
190
  'cond_free_k': 2.0, 'diffusion_temperature': 1.0})
191
  # Presets are defined here.
192
  presets = {
@@ -202,7 +204,8 @@ class TextToSpeech:
202
  # autoregressive generation parameters follow
203
  num_autoregressive_samples=512, temperature=.8, length_penalty=1, repetition_penalty=2.0, top_p=.8,
204
  # diffusion generation parameters follow
205
- diffusion_iterations=100, cond_free=True, cond_free_k=2, diffusion_temperature=1.0,):
 
206
  text = torch.IntTensor(self.tokenizer.encode(text)).unsqueeze(0).cuda()
207
  text = F.pad(text, (0, 1)) # This may not be necessary.
208
 
@@ -228,7 +231,8 @@ class TextToSpeech:
228
  temperature=temperature,
229
  num_return_sequences=self.autoregressive_batch_size,
230
  length_penalty=length_penalty,
231
- repetition_penalty=repetition_penalty)
 
232
  padding_needed = self.autoregressive.max_mel_tokens - codes.shape[1]
233
  codes = F.pad(codes, (0, padding_needed), value=stop_mel_token)
234
  samples.append(codes)
 
186
  'high_quality': Use if you want the absolute best. This is not really worth the compute, though.
187
  """
188
  # Use generally found best tuning knobs for generation.
189
+ kwargs.update({'temperature': .8, 'length_penalty': 1.0, 'repetition_penalty': 2.0,
190
+ #'typical_sampling': True,
191
+ 'top_p': .8,
192
  'cond_free_k': 2.0, 'diffusion_temperature': 1.0})
193
  # Presets are defined here.
194
  presets = {
 
204
  # autoregressive generation parameters follow
205
  num_autoregressive_samples=512, temperature=.8, length_penalty=1, repetition_penalty=2.0, top_p=.8,
206
  # diffusion generation parameters follow
207
+ diffusion_iterations=100, cond_free=True, cond_free_k=2, diffusion_temperature=1.0,
208
+ **hf_generate_kwargs):
209
  text = torch.IntTensor(self.tokenizer.encode(text)).unsqueeze(0).cuda()
210
  text = F.pad(text, (0, 1)) # This may not be necessary.
211
 
 
231
  temperature=temperature,
232
  num_return_sequences=self.autoregressive_batch_size,
233
  length_penalty=length_penalty,
234
+ repetition_penalty=repetition_penalty,
235
+ **hf_generate_kwargs)
236
  padding_needed = self.autoregressive.max_mel_tokens - codes.shape[1]
237
  codes = F.pad(codes, (0, padding_needed), value=stop_mel_token)
238
  samples.append(codes)
eval_multiple.py CHANGED
@@ -16,7 +16,7 @@ if __name__ == '__main__':
16
  lines = [l.strip().split('\t') for l in f.readlines()]
17
 
18
  tts = TextToSpeech()
19
- for k in range(4):
20
  outpath = f'{outpath_base}_{k}'
21
  os.makedirs(outpath, exist_ok=True)
22
  recorder = open(os.path.join(outpath, 'transcript.tsv'), 'w', encoding='utf-8')
@@ -27,9 +27,7 @@ if __name__ == '__main__':
27
  path = os.path.join(os.path.dirname(fname), line[1])
28
  cond_audio = load_audio(path, 22050)
29
  torchaudio.save(os.path.join(outpath_real, os.path.basename(line[1])), cond_audio, 22050)
30
- sample = tts.tts(transcript, [cond_audio, cond_audio], num_autoregressive_samples=128, k=1,
31
- repetition_penalty=2.0, length_penalty=2, temperature=.5, top_p=.5,
32
- diffusion_temperature=.7, cond_free_k=2, diffusion_iterations=70)
33
 
34
  down = torchaudio.functional.resample(sample, 24000, 22050)
35
  fout_path = os.path.join(outpath, os.path.basename(line[1]))
 
16
  lines = [l.strip().split('\t') for l in f.readlines()]
17
 
18
  tts = TextToSpeech()
19
+ for k in range(3):
20
  outpath = f'{outpath_base}_{k}'
21
  os.makedirs(outpath, exist_ok=True)
22
  recorder = open(os.path.join(outpath, 'transcript.tsv'), 'w', encoding='utf-8')
 
27
  path = os.path.join(os.path.dirname(fname), line[1])
28
  cond_audio = load_audio(path, 22050)
29
  torchaudio.save(os.path.join(outpath_real, os.path.basename(line[1])), cond_audio, 22050)
30
+ sample = tts.tts_with_preset(transcript, [cond_audio, cond_audio], preset='standard')
 
 
31
 
32
  down = torchaudio.functional.resample(sample, 24000, 22050)
33
  fout_path = os.path.join(outpath, os.path.basename(line[1]))
models/cvvp.py ADDED
File without changes