alp commited on
Commit
ffdf148
1 Parent(s): 3922ca3

Upload 2 files

Browse files
ljspeech--hifigan_v2_config.json ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "run_name": "hifigan",
3
+ "run_description": "universal hifigan trained on LibriTTS with no spectrogram normalization and using log() for scaling instead of log10()",
4
+
5
+
6
+ // AUDIO PARAMETERS
7
+ "audio":{
8
+ "fft_size": 1024, // number of stft frequency levels. Size of the linear spectogram frame.
9
+ "win_length": 1024, // stft window length in ms.
10
+ "hop_length": 256, // stft window hop-lengh in ms.
11
+ "frame_length_ms": null, // stft window length in ms.If null, 'win_length' is used.
12
+ "frame_shift_ms": null, // stft window hop-lengh in ms. If null, 'hop_length' is used.
13
+
14
+ // Audio processing parameters
15
+ "sample_rate": 22050, // DATASET-RELATED: wav sample-rate. If different than the original data, it is resampled.
16
+ "preemphasis": 0.0, // pre-emphasis to reduce spec noise and make it more structured. If 0.0, no -pre-emphasis.
17
+ "ref_level_db": 20, // reference level db, theoretically 20db is the sound of air.
18
+ "log_func": "np.log",
19
+
20
+ // Silence trimming
21
+ "do_trim_silence": false,// enable trimming of slience of audio as you load it. LJspeech (false), TWEB (false), Nancy (true)
22
+ "trim_db": 60, // threshold for timming silence. Set this according to your dataset.
23
+
24
+ // MelSpectrogram parameters
25
+ "num_mels": 80, // size of the mel spec frame.
26
+ "mel_fmin": 0.0, // minimum freq level for mel-spec. ~50 for male and ~95 for female voices. Tune for dataset!!
27
+ "mel_fmax": 8000.0, // maximum freq level for mel-spec. Tune for dataset!!
28
+ "spec_gain": 1.0, // scaler value appplied after log transform of spectrogram.
29
+
30
+ // Normalization parameters
31
+ "signal_norm": false, // normalize spec values. Mean-Var normalization if 'stats_path' is defined otherwise range normalization defined by the other params.
32
+ "min_level_db": -100, // lower bound for normalization
33
+ "symmetric_norm": true, // move normalization to range [-1, 1]
34
+ "max_norm": 4.0, // scale normalization to range [-max_norm, max_norm] or [0, max_norm]
35
+ "clip_norm": true, // clip normalized values into the range.
36
+ "stats_path": null // DO NOT USE WITH MULTI_SPEAKER MODEL. scaler stats file computed by 'compute_statistics.py'. If it is defined, mean-std based notmalization is used and other normalization params are ignored
37
+ },
38
+
39
+ // DISTRIBUTED TRAINING
40
+ "distributed":{
41
+ "backend": "nccl",
42
+ "url": "tcp:\/\/localhost:54324"
43
+ },
44
+
45
+ // MODEL PARAMETERS
46
+ "use_pqmf": false,
47
+
48
+ // LOSS PARAMETERS
49
+ "use_stft_loss": false,
50
+ "use_subband_stft_loss": false,
51
+ "use_mse_gan_loss": true,
52
+ "use_hinge_gan_loss": false,
53
+ "use_feat_match_loss": true, // use only with melgan discriminators
54
+ "use_l1_spec_loss": true,
55
+
56
+ // loss weights
57
+ "stft_loss_weight": 0,
58
+ "subband_stft_loss_weight": 0,
59
+ "mse_G_loss_weight": 1,
60
+ "hinge_G_loss_weight": 0,
61
+ "feat_match_loss_weight": 10,
62
+ "l1_spec_loss_weight": 45,
63
+
64
+ // multiscale stft loss parameters
65
+ // "stft_loss_params": {
66
+ // "n_ffts": [1024, 2048, 512],
67
+ // "hop_lengths": [120, 240, 50],
68
+ // "win_lengths": [600, 1200, 240]
69
+ // },
70
+
71
+ "l1_spec_loss_params": {
72
+ "use_mel": true,
73
+ "sample_rate": 16000,
74
+ "n_fft": 1024,
75
+ "hop_length": 256,
76
+ "win_length": 1024,
77
+ "n_mels": 80,
78
+ "mel_fmin": 0.0,
79
+ "mel_fmax": null
80
+ },
81
+
82
+ "target_loss": "avg_G_loss", // loss value to pick the best model to save after each epoch
83
+
84
+ // DISCRIMINATOR
85
+ "discriminator_model": "hifigan_discriminator",
86
+ //"discriminator_model_params":{
87
+ // "peroids": [2, 3, 5, 7, 11],
88
+ // "base_channels": 16,
89
+ // "max_channels":512,
90
+ // "downsample_factors":[4, 4, 4]
91
+ //},
92
+ "steps_to_start_discriminator": 0, // steps required to start GAN trainining.1
93
+
94
+ // GENERATOR
95
+ "generator_model": "hifigan_generator",
96
+ "generator_model_params": {
97
+ "resblock_type": "1",
98
+ "upsample_factors": [8,8,2,2],
99
+ "upsample_kernel_sizes": [16,16,4,4],
100
+ "upsample_initial_channel": 128,
101
+ "resblock_kernel_sizes": [3,7,11],
102
+ "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]]
103
+ },
104
+
105
+ // DATASET
106
+ "data_path": "/home/erogol/gdrive/Datasets/non-binary-voice-files/vo_voice_quality_transformation/",
107
+ "feature_path": null,
108
+ // "feature_path": "/home/erogol/gdrive/Datasets/non-binary-voice-files/tacotron-DCA/",
109
+ "seq_len": 8192,
110
+ "pad_short": 2000,
111
+ "conv_pad": 0,
112
+ "use_noise_augment": false,
113
+ "use_cache": true,
114
+ "reinit_layers": [], // give a list of layer names to restore from the given checkpoint. If not defined, it reloads all heuristically matching layers.
115
+
116
+ // TRAINING
117
+ "batch_size": 16, // Batch size for training. Lower values than 32 might cause hard to learn attention. It is overwritten by 'gradual_training'.
118
+
119
+ // VALIDATION
120
+ "run_eval": true,
121
+ "test_delay_epochs": 10, //Until attention is aligned, testing only wastes computation time.
122
+ "test_sentences_file": null, // set a file to load sentences to be used for testing. If it is null then we use default english sentences.
123
+
124
+ // OPTIMIZER
125
+ "epochs": 10000, // total number of epochs to train.
126
+ "wd": 0.0, // Weight decay weight.
127
+ "gen_clip_grad": -1, // Generator gradient clipping threshold. Apply gradient clipping if > 0
128
+ "disc_clip_grad": -1, // Discriminator gradient clipping threshold.
129
+ // "lr_scheduler_gen": "ExponentialLR", // one of the schedulers from https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate
130
+ // "lr_scheduler_gen_params": {
131
+ // "gamma": 0.999,
132
+ // "last_epoch": -1
133
+ // },
134
+ // "lr_scheduler_disc": "ExponentialLR", // one of the schedulers from https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate
135
+ // "lr_scheduler_disc_params": {
136
+ // "gamma": 0.999,
137
+ // "last_epoch": -1
138
+ // },
139
+ "lr_gen": 0.00001, // Initial learning rate. If Noam decay is active, maximum learning rate.
140
+ "lr_disc": 0.00001,
141
+
142
+ // TENSORBOARD and LOGGING
143
+ "print_step": 25, // Number of steps to log traning on console.
144
+ "print_eval": false, // If True, it prints loss values for each step in eval run.
145
+ "save_step": 25000, // Number of training steps expected to plot training stats on TB and save model checkpoints.
146
+ "checkpoint": true, // If true, it saves checkpoints per "save_step"
147
+ "tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
148
+
149
+ // DATA LOADING
150
+ "num_loader_workers": 8, // number of training data loader processes. Don't set it too big. 4-8 are good values.
151
+ "num_val_loader_workers": 4, // number of evaluation data loader processes.
152
+ "eval_split_size": 10,
153
+
154
+ // PATHS
155
+ "output_path": "/home/erogol/gdrive/Trainings/sam/"
156
+ }
157
+
158
+
ljspeech--hifigan_v2_model_file.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4047e93886faa1aba11948efa71f59dcb0ec9117e286660e59b91892ef98d129
3
+ size 3794153