Update config_dit_mel_seed_facodec_small_wavenet.yml
Browse files
config_dit_mel_seed_facodec_small_wavenet.yml
CHANGED
@@ -1,97 +1,97 @@
|
|
1 |
-
log_dir: "./runs/run_dit_mel_seed_facodec_small"
|
2 |
-
save_freq: 1
|
3 |
-
log_interval: 10
|
4 |
-
save_interval: 1000
|
5 |
-
device: "cuda"
|
6 |
-
epochs: 1000 # number of epochs for first stage training (pre-training)
|
7 |
-
batch_size: 2
|
8 |
-
batch_length: 100 # maximum duration of audio in a batch (in seconds)
|
9 |
-
max_len: 80 # maximum number of frames
|
10 |
-
pretrained_model: ""
|
11 |
-
pretrained_encoder: ""
|
12 |
-
load_only_params: False # set to true if do not want to load epoch numbers and optimizer parameters
|
13 |
-
|
14 |
-
F0_path: "modules/JDC/bst.t7"
|
15 |
-
|
16 |
-
data_params:
|
17 |
-
train_data: "./data/train.txt"
|
18 |
-
val_data: "./data/val.txt"
|
19 |
-
root_path: "./data/"
|
20 |
-
|
21 |
-
preprocess_params:
|
22 |
-
sr: 22050
|
23 |
-
spect_params:
|
24 |
-
n_fft: 1024
|
25 |
-
win_length: 1024
|
26 |
-
hop_length: 256
|
27 |
-
n_mels: 80
|
28 |
-
|
29 |
-
model_params:
|
30 |
-
dit_type: "DiT" # uDiT or DiT
|
31 |
-
reg_loss_type: "l1" # l1 or l2
|
32 |
-
|
33 |
-
speech_tokenizer:
|
34 |
-
type: 'facodec'
|
35 |
-
path: "
|
36 |
-
|
37 |
-
style_encoder:
|
38 |
-
dim: 192
|
39 |
-
campplus_path: "
|
40 |
-
|
41 |
-
DAC:
|
42 |
-
encoder_dim: 64
|
43 |
-
encoder_rates: [2, 5, 5, 6]
|
44 |
-
decoder_dim: 1536
|
45 |
-
decoder_rates: [ 6, 5, 5, 2 ]
|
46 |
-
sr: 24000
|
47 |
-
|
48 |
-
length_regulator:
|
49 |
-
channels: 512
|
50 |
-
is_discrete: true
|
51 |
-
content_codebook_size: 1024
|
52 |
-
in_frame_rate: 80
|
53 |
-
out_frame_rate: 80
|
54 |
-
sampling_ratios: [1, 1, 1, 1]
|
55 |
-
token_dropout_prob: 0.3 # probability of performing token dropout
|
56 |
-
token_dropout_range: 1.0 # maximum percentage of tokens to drop out
|
57 |
-
n_codebooks: 3
|
58 |
-
quantizer_dropout: 0.5
|
59 |
-
f0_condition: false
|
60 |
-
n_f0_bins: 512
|
61 |
-
|
62 |
-
DiT:
|
63 |
-
hidden_dim: 512
|
64 |
-
num_heads: 8
|
65 |
-
depth: 13
|
66 |
-
class_dropout_prob: 0.1
|
67 |
-
block_size: 8192
|
68 |
-
in_channels: 80
|
69 |
-
style_condition: true
|
70 |
-
final_layer_type: 'wavenet'
|
71 |
-
target: 'mel' # mel or codec
|
72 |
-
content_dim: 512
|
73 |
-
content_codebook_size: 1024
|
74 |
-
content_type: 'discrete'
|
75 |
-
f0_condition: true
|
76 |
-
n_f0_bins: 512
|
77 |
-
content_codebooks: 1
|
78 |
-
is_causal: false
|
79 |
-
long_skip_connection: true
|
80 |
-
zero_prompt_speech_token: false # for prompt component, do not input corresponding speech token
|
81 |
-
time_as_token: false
|
82 |
-
style_as_token: false
|
83 |
-
uvit_skip_connection: true
|
84 |
-
add_resblock_in_transformer: false
|
85 |
-
|
86 |
-
wavenet:
|
87 |
-
hidden_dim: 512
|
88 |
-
num_layers: 8
|
89 |
-
kernel_size: 5
|
90 |
-
dilation_rate: 1
|
91 |
-
p_dropout: 0.2
|
92 |
-
style_condition: true
|
93 |
-
|
94 |
-
loss_params:
|
95 |
-
base_lr: 0.0001
|
96 |
-
lambda_mel: 45
|
97 |
lambda_kl: 1.0
|
|
|
1 |
+
log_dir: "./runs/run_dit_mel_seed_facodec_small"
|
2 |
+
save_freq: 1
|
3 |
+
log_interval: 10
|
4 |
+
save_interval: 1000
|
5 |
+
device: "cuda"
|
6 |
+
epochs: 1000 # number of epochs for first stage training (pre-training)
|
7 |
+
batch_size: 2
|
8 |
+
batch_length: 100 # maximum duration of audio in a batch (in seconds)
|
9 |
+
max_len: 80 # maximum number of frames
|
10 |
+
pretrained_model: ""
|
11 |
+
pretrained_encoder: ""
|
12 |
+
load_only_params: False # set to true if do not want to load epoch numbers and optimizer parameters
|
13 |
+
|
14 |
+
F0_path: "modules/JDC/bst.t7"
|
15 |
+
|
16 |
+
data_params:
|
17 |
+
train_data: "./data/train.txt"
|
18 |
+
val_data: "./data/val.txt"
|
19 |
+
root_path: "./data/"
|
20 |
+
|
21 |
+
preprocess_params:
|
22 |
+
sr: 22050
|
23 |
+
spect_params:
|
24 |
+
n_fft: 1024
|
25 |
+
win_length: 1024
|
26 |
+
hop_length: 256
|
27 |
+
n_mels: 80
|
28 |
+
|
29 |
+
model_params:
|
30 |
+
dit_type: "DiT" # uDiT or DiT
|
31 |
+
reg_loss_type: "l1" # l1 or l2
|
32 |
+
|
33 |
+
speech_tokenizer:
|
34 |
+
type: 'facodec'
|
35 |
+
path: "speech_tokenizer_v1.onnx"
|
36 |
+
|
37 |
+
style_encoder:
|
38 |
+
dim: 192
|
39 |
+
campplus_path: "campplus_cn_common.bin"
|
40 |
+
|
41 |
+
DAC:
|
42 |
+
encoder_dim: 64
|
43 |
+
encoder_rates: [2, 5, 5, 6]
|
44 |
+
decoder_dim: 1536
|
45 |
+
decoder_rates: [ 6, 5, 5, 2 ]
|
46 |
+
sr: 24000
|
47 |
+
|
48 |
+
length_regulator:
|
49 |
+
channels: 512
|
50 |
+
is_discrete: true
|
51 |
+
content_codebook_size: 1024
|
52 |
+
in_frame_rate: 80
|
53 |
+
out_frame_rate: 80
|
54 |
+
sampling_ratios: [1, 1, 1, 1]
|
55 |
+
token_dropout_prob: 0.3 # probability of performing token dropout
|
56 |
+
token_dropout_range: 1.0 # maximum percentage of tokens to drop out
|
57 |
+
n_codebooks: 3
|
58 |
+
quantizer_dropout: 0.5
|
59 |
+
f0_condition: false
|
60 |
+
n_f0_bins: 512
|
61 |
+
|
62 |
+
DiT:
|
63 |
+
hidden_dim: 512
|
64 |
+
num_heads: 8
|
65 |
+
depth: 13
|
66 |
+
class_dropout_prob: 0.1
|
67 |
+
block_size: 8192
|
68 |
+
in_channels: 80
|
69 |
+
style_condition: true
|
70 |
+
final_layer_type: 'wavenet'
|
71 |
+
target: 'mel' # mel or codec
|
72 |
+
content_dim: 512
|
73 |
+
content_codebook_size: 1024
|
74 |
+
content_type: 'discrete'
|
75 |
+
f0_condition: true
|
76 |
+
n_f0_bins: 512
|
77 |
+
content_codebooks: 1
|
78 |
+
is_causal: false
|
79 |
+
long_skip_connection: true
|
80 |
+
zero_prompt_speech_token: false # for prompt component, do not input corresponding speech token
|
81 |
+
time_as_token: false
|
82 |
+
style_as_token: false
|
83 |
+
uvit_skip_connection: true
|
84 |
+
add_resblock_in_transformer: false
|
85 |
+
|
86 |
+
wavenet:
|
87 |
+
hidden_dim: 512
|
88 |
+
num_layers: 8
|
89 |
+
kernel_size: 5
|
90 |
+
dilation_rate: 1
|
91 |
+
p_dropout: 0.2
|
92 |
+
style_condition: true
|
93 |
+
|
94 |
+
loss_params:
|
95 |
+
base_lr: 0.0001
|
96 |
+
lambda_mel: 45
|
97 |
lambda_kl: 1.0
|