Text-to-Speech
English
File size: 1,922 Bytes
9a85254
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0c6aaeb
9a85254
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
ASR_config: Utils/ASR/config.yml
ASR_path: Utils/ASR/epoch_00080.pth
F0_path: Utils/JDC/bst.t7
PLBERT_dir: Utils/PLBERT/
batch_size: 2
data_params:
  OOD_data: Data/OOD_texts.txt
  logger: wandb
  min_length: 50
  root_path: Data
  train_data: Data/Train_list.txt
  val_data: Data/Val_list.txt
device: cuda
epochs: 50
load_only_params: true
log_dir: Models/LJSpeech
log_interval: 10
loss_params:
  diff_epoch: 10
  joint_epoch: 30
  lambda_F0: 1
  lambda_ce: 20
  lambda_diff: 1
  lambda_dur: 1
  lambda_gen: 1
  lambda_mel: 5
  lambda_mono: 1
  lambda_norm: 1
  lambda_s2s: 1
  lambda_slm: 1
  lambda_sty: 1
max_len: 200
model_params:
  decoder:
    resblock_dilation_sizes:
      - - 1
        - 3
        - 5
      - - 1
        - 3
        - 5
      - - 1
        - 3
        - 5
    resblock_kernel_sizes:
      - 3
      - 7
      - 11
    type: hifigan
    upsample_initial_channel: 512
    upsample_kernel_sizes:
      - 20
      - 10
      - 6
      - 4
    upsample_rates:
      - 10
      - 5
      - 3
      - 2
  diffusion:
    dist:
      estimate_sigma_data: true
      mean: -3
      sigma_data: .18
      std: 1
    embedding_mask_proba: 0.1
    transformer:
      head_features: 64
      multiplier: 2
      num_heads: 8
      num_layers: 3
  dim_in: 64
  dropout: 0.2
  hidden_dim: 512
  max_conv_dim: 512
  max_dur: 50
  multispeaker: true
  n_layer: 3
  n_mels: 80
  n_token: 178
  slm:
    hidden: 768
    initial_channel: 64
    model: microsoft/wavlm-base-plus
    nlayers: 13
    sr: 16000
  style_dim: 128
optimizer_params:
  bert_lr: 0.00001
  ft_lr: 0.0001
  lr: 0.0001
preprocess_params:
  spect_params:
    hop_length: 300
    n_fft: 2048
    win_length: 1200
  sr: 24000
pretrained_model: Models/LibriTTS/epochs_2nd_00020.pth
save_freq: 1
second_stage_load_pretrained: true
slmadv_params:
  batch_percentage: 0.5
  iter: 10
  max_len: 500
  min_len: 400
  scale: 0.01
  sig: 1.5
  thresh: 5