ForHHeart commited on
Commit
ccb7f5d
1 Parent(s): 9ac3429

Upload SpeechT5HifiGan

Browse files
Files changed (2) hide show
  1. config.json +40 -83
  2. model.safetensors +2 -2
config.json CHANGED
@@ -1,92 +1,49 @@
1
  {
2
- "_name_or_path": "ForHHeart/speecht5_finetuned_sawahili",
3
- "activation_dropout": 0.1,
4
- "apply_spec_augment": true,
5
  "architectures": [
6
- "SpeechT5ForTextToSpeech"
7
  ],
8
- "attention_dropout": 0.1,
9
- "bos_token_id": 0,
10
- "conv_bias": false,
11
- "conv_dim": [
12
- 512,
13
- 512,
14
- 512,
15
- 512,
16
- 512,
17
- 512,
18
- 512
 
 
 
 
 
 
 
 
 
 
19
  ],
20
- "conv_kernel": [
21
- 10,
22
  3,
23
- 3,
24
- 3,
25
- 3,
26
- 2,
27
- 2
28
  ],
29
- "conv_stride": [
30
- 5,
31
- 2,
32
- 2,
33
- 2,
34
- 2,
35
- 2,
36
- 2
37
- ],
38
- "decoder_attention_heads": 12,
39
- "decoder_ffn_dim": 3072,
40
- "decoder_layerdrop": 0.1,
41
- "decoder_layers": 6,
42
- "decoder_start_token_id": 2,
43
- "encoder_attention_heads": 12,
44
- "encoder_ffn_dim": 3072,
45
- "encoder_layerdrop": 0.1,
46
- "encoder_layers": 12,
47
- "encoder_max_relative_position": 160,
48
- "eos_token_id": 2,
49
- "feat_extract_activation": "gelu",
50
- "feat_extract_norm": "group",
51
- "feat_proj_dropout": 0.0,
52
- "guided_attention_loss_num_heads": 2,
53
- "guided_attention_loss_scale": 10.0,
54
- "guided_attention_loss_sigma": 0.4,
55
- "hidden_act": "gelu",
56
- "hidden_dropout": 0.1,
57
- "hidden_size": 768,
58
- "initializer_range": 0.02,
59
- "is_encoder_decoder": true,
60
- "layer_norm_eps": 1e-05,
61
- "mask_feature_length": 10,
62
- "mask_feature_min_masks": 0,
63
- "mask_feature_prob": 0.0,
64
- "mask_time_length": 10,
65
- "mask_time_min_masks": 2,
66
- "mask_time_prob": 0.05,
67
- "max_length": 1876,
68
- "max_speech_positions": 1876,
69
- "max_text_positions": 600,
70
- "model_type": "speecht5",
71
- "num_conv_pos_embedding_groups": 16,
72
- "num_conv_pos_embeddings": 128,
73
- "num_feat_extract_layers": 7,
74
- "num_mel_bins": 80,
75
- "pad_token_id": 1,
76
- "positional_dropout": 0.1,
77
- "reduction_factor": 2,
78
- "scale_embedding": false,
79
- "speaker_embedding_dim": 512,
80
- "speech_decoder_postnet_dropout": 0.5,
81
- "speech_decoder_postnet_kernel": 5,
82
- "speech_decoder_postnet_layers": 5,
83
- "speech_decoder_postnet_units": 256,
84
- "speech_decoder_prenet_dropout": 0.5,
85
- "speech_decoder_prenet_layers": 2,
86
- "speech_decoder_prenet_units": 256,
87
  "torch_dtype": "float32",
88
  "transformers_version": "4.40.0",
89
- "use_cache": true,
90
- "use_guided_attention_loss": true,
91
- "vocab_size": 81
 
 
 
 
 
 
 
 
 
 
92
  }
 
1
  {
2
+ "_name_or_path": "microsoft/speecht5_hifigan",
 
 
3
  "architectures": [
4
+ "SpeechT5HifiGan"
5
  ],
6
+ "initializer_range": 0.01,
7
+ "leaky_relu_slope": 0.1,
8
+ "model_in_dim": 80,
9
+ "model_type": "hifigan",
10
+ "normalize_before": true,
11
+ "resblock_dilation_sizes": [
12
+ [
13
+ 1,
14
+ 3,
15
+ 5
16
+ ],
17
+ [
18
+ 1,
19
+ 3,
20
+ 5
21
+ ],
22
+ [
23
+ 1,
24
+ 3,
25
+ 5
26
+ ]
27
  ],
28
+ "resblock_kernel_sizes": [
 
29
  3,
30
+ 7,
31
+ 11
 
 
 
32
  ],
33
+ "sampling_rate": 16000,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
  "torch_dtype": "float32",
35
  "transformers_version": "4.40.0",
36
+ "upsample_initial_channel": 512,
37
+ "upsample_kernel_sizes": [
38
+ 8,
39
+ 8,
40
+ 8,
41
+ 8
42
+ ],
43
+ "upsample_rates": [
44
+ 4,
45
+ 4,
46
+ 4,
47
+ 4
48
+ ]
49
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:17cb8139940fda044376037a5c59f130688e1fbd88589f2ac9ef58366609200b
3
- size 577789320
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5e1ae998705b24b74b79b55bd9015f458ee85dbc1e98448aa7f0e0e066eba1bd
3
+ size 50640724