ashishkblink commited on
Commit
db5ada1
·
verified ·
1 Parent(s): 2b0abe3

Upload f5_tts/configs/F5TTS_Base_train.yaml with huggingface_hub

Browse files
f5_tts/configs/F5TTS_Base_train.yaml ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ hydra:
2
+ run:
3
+ dir: ckpts/${model.name}_${model.mel_spec.mel_spec_type}_${model.tokenizer}_${datasets.name}/${now:%Y-%m-%d}/${now:%H-%M-%S}
4
+
5
+ datasets:
6
+ name: Emilia_ZH_EN # dataset name
7
+ batch_size_per_gpu: 38400 # 8 GPUs, 8 * 38400 = 307200
8
+ batch_size_type: frame # "frame" or "sample"
9
+ max_samples: 64 # max sequences per batch if use frame-wise batch_size. we set 32 for small models, 64 for base models
10
+ num_workers: 16
11
+
12
+ optim:
13
+ epochs: 15
14
+ learning_rate: 7.5e-5
15
+ num_warmup_updates: 20000 # warmup steps
16
+ grad_accumulation_steps: 1 # note: updates = steps / grad_accumulation_steps
17
+ max_grad_norm: 1.0 # gradient clipping
18
+ bnb_optimizer: False # use bnb 8bit AdamW optimizer or not
19
+
20
+ model:
21
+ name: F5TTS_Base # model name
22
+ tokenizer: pinyin # tokenizer type
23
+ tokenizer_path: None # if tokenizer = 'custom', define the path to the tokenizer you want to use (should be vocab.txt)
24
+ arch:
25
+ dim: 1024
26
+ depth: 22
27
+ heads: 16
28
+ ff_mult: 2
29
+ text_dim: 512
30
+ conv_layers: 4
31
+ mel_spec:
32
+ target_sample_rate: 24000
33
+ n_mel_channels: 100
34
+ hop_length: 256
35
+ win_length: 1024
36
+ n_fft: 1024
37
+ mel_spec_type: vocos # 'vocos' or 'bigvgan'
38
+ vocoder:
39
+ is_local: False # use local offline ckpt or not
40
+ local_path: None # local vocoder path
41
+
42
+ ckpts:
43
+ logger: wandb # wandb | tensorboard | None
44
+ save_per_updates: 50000 # save checkpoint per steps
45
+ last_per_steps: 5000 # save last checkpoint per steps
46
+ save_dir: ckpts/${model.name}_${model.mel_spec.mel_spec_type}_${model.tokenizer}_${datasets.name}