Spaces:
Running
on
Zero
Running
on
Zero
File size: 2,453 Bytes
f32cd36 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 |
log_dir: "./runs/run_dit_mel_seed_facodec_small"
save_freq: 1
log_interval: 10
save_interval: 1000
device: "cuda"
epochs: 1000 # number of epochs for first stage training (pre-training)
batch_size: 2
batch_length: 100 # maximum duration of audio in a batch (in seconds)
max_len: 80 # maximum number of frames
pretrained_model: ""
pretrained_encoder: ""
load_only_params: False # set to true if do not want to load epoch numbers and optimizer parameters
F0_path: "modules/JDC/bst.t7"
data_params:
train_data: "./data/train.txt"
val_data: "./data/val.txt"
root_path: "./data/"
preprocess_params:
sr: 22050
spect_params:
n_fft: 1024
win_length: 1024
hop_length: 256
n_mels: 80
model_params:
dit_type: "DiT" # uDiT or DiT
reg_loss_type: "l1" # l1 or l2
speech_tokenizer:
type: 'facodec' # facodec or cosyvoice
path: "checkpoints/speech_tokenizer_v1.onnx"
style_encoder:
dim: 192
campplus_path: "checkpoints/campplus_cn_common.bin"
DAC:
encoder_dim: 64
encoder_rates: [2, 5, 5, 6]
decoder_dim: 1536
decoder_rates: [ 6, 5, 5, 2 ]
sr: 24000
length_regulator:
channels: 512
is_discrete: true
content_codebook_size: 1024
in_frame_rate: 80
out_frame_rate: 80
sampling_ratios: [1, 1, 1, 1]
token_dropout_prob: 0.3 # probability of performing token dropout
token_dropout_range: 1.0 # maximum percentage of tokens to drop out
n_codebooks: 3
quantizer_dropout: 0.5
f0_condition: false
n_f0_bins: 512
DiT:
hidden_dim: 512
num_heads: 8
depth: 13
class_dropout_prob: 0.1
block_size: 8192
in_channels: 80
style_condition: true
final_layer_type: 'wavenet'
target: 'mel' # mel or codec
content_dim: 512
content_codebook_size: 1024
content_type: 'discrete'
f0_condition: true
n_f0_bins: 512
content_codebooks: 1
is_causal: false
long_skip_connection: true
zero_prompt_speech_token: false # for prompt component, do not input corresponding speech token
time_as_token: false
style_as_token: false
uvit_skip_connection: true
add_resblock_in_transformer: false
wavenet:
hidden_dim: 512
num_layers: 8
kernel_size: 5
dilation_rate: 1
p_dropout: 0.2
style_condition: true
loss_params:
base_lr: 0.0001
lambda_mel: 45
lambda_kl: 1.0 |