Upload folder using huggingface_hub

#1
by ylacombe HF staff - opened
Files changed (2) hide show
  1. config.json +258 -0
  2. pytorch_model.bin +3 -0
config.json ADDED
@@ -0,0 +1,258 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_commit_hash": null,
3
+ "architectures": [
4
+ "BarkModel"
5
+ ],
6
+ "coarse_acoustics_config": {
7
+ "_name_or_path": "",
8
+ "add_cross_attention": false,
9
+ "architectures": [
10
+ "BarkCoarseAcousticsModule"
11
+ ],
12
+ "bad_words_ids": null,
13
+ "begin_suppress_tokens": null,
14
+ "bias": false,
15
+ "block_size": 1024,
16
+ "bos_token_id": null,
17
+ "chunk_size_feed_forward": 0,
18
+ "cross_attention_hidden_size": null,
19
+ "decoder_start_token_id": null,
20
+ "diversity_penalty": 0.0,
21
+ "do_sample": false,
22
+ "dropout": 0.0,
23
+ "early_stopping": false,
24
+ "encoder_no_repeat_ngram_size": 0,
25
+ "eos_token_id": null,
26
+ "exponential_decay_length_penalty": null,
27
+ "finetuning_task": null,
28
+ "forced_bos_token_id": null,
29
+ "forced_eos_token_id": null,
30
+ "hidden_size": 1024,
31
+ "id2label": {
32
+ "0": "LABEL_0",
33
+ "1": "LABEL_1"
34
+ },
35
+ "initializer_range": 0.02,
36
+ "input_vocab_size": 12096,
37
+ "is_decoder": false,
38
+ "is_encoder_decoder": false,
39
+ "label2id": {
40
+ "LABEL_0": 0,
41
+ "LABEL_1": 1
42
+ },
43
+ "length_penalty": 1.0,
44
+ "max_length": 20,
45
+ "min_length": 0,
46
+ "model_type": "coarse_acoustics",
47
+ "n_codes_given": 1,
48
+ "n_codes_total": 8,
49
+ "no_repeat_ngram_size": 0,
50
+ "num_beam_groups": 1,
51
+ "num_beams": 1,
52
+ "num_heads": 16,
53
+ "num_layers": 24,
54
+ "num_return_sequences": 1,
55
+ "output_attentions": false,
56
+ "output_hidden_states": false,
57
+ "output_scores": false,
58
+ "output_vocab_size": 12096,
59
+ "pad_token_id": null,
60
+ "prefix": null,
61
+ "problem_type": null,
62
+ "pruned_heads": {},
63
+ "remove_invalid_values": false,
64
+ "repetition_penalty": 1.0,
65
+ "return_dict": true,
66
+ "return_dict_in_generate": false,
67
+ "sep_token_id": null,
68
+ "suppress_tokens": null,
69
+ "task_specific_params": null,
70
+ "temperature": 1.0,
71
+ "tf_legacy_loss": false,
72
+ "tie_encoder_decoder": false,
73
+ "tie_word_embeddings": true,
74
+ "tokenizer_class": null,
75
+ "top_k": 50,
76
+ "top_p": 1.0,
77
+ "torch_dtype": "float32",
78
+ "torchscript": false,
79
+ "transformers_version": "4.31.0.dev0",
80
+ "typical_p": 1.0,
81
+ "use_bfloat16": false,
82
+ "use_cache": true
83
+ },
84
+ "coarse_infer_token": 12050,
85
+ "coarse_rate_hz": 75,
86
+ "coarse_semantic_pad_token": 12048,
87
+ "codebook_size": 1024,
88
+ "context_window_size": 1024,
89
+ "fine_acoustics_config": {
90
+ "_name_or_path": "",
91
+ "add_cross_attention": false,
92
+ "architectures": [
93
+ "BarkFineAcousticsModule"
94
+ ],
95
+ "bad_words_ids": null,
96
+ "begin_suppress_tokens": null,
97
+ "bias": false,
98
+ "block_size": 1024,
99
+ "bos_token_id": null,
100
+ "chunk_size_feed_forward": 0,
101
+ "cross_attention_hidden_size": null,
102
+ "decoder_start_token_id": null,
103
+ "diversity_penalty": 0.0,
104
+ "do_sample": false,
105
+ "dropout": 0.0,
106
+ "early_stopping": false,
107
+ "encoder_no_repeat_ngram_size": 0,
108
+ "eos_token_id": null,
109
+ "exponential_decay_length_penalty": null,
110
+ "finetuning_task": null,
111
+ "forced_bos_token_id": null,
112
+ "forced_eos_token_id": null,
113
+ "hidden_size": 1024,
114
+ "id2label": {
115
+ "0": "LABEL_0",
116
+ "1": "LABEL_1"
117
+ },
118
+ "initializer_range": 0.02,
119
+ "input_vocab_size": 1056,
120
+ "is_decoder": false,
121
+ "is_encoder_decoder": false,
122
+ "label2id": {
123
+ "LABEL_0": 0,
124
+ "LABEL_1": 1
125
+ },
126
+ "length_penalty": 1.0,
127
+ "max_length": 20,
128
+ "min_length": 0,
129
+ "model_type": "fine_acoustics",
130
+ "n_codes_given": 1,
131
+ "n_codes_total": 8,
132
+ "no_repeat_ngram_size": 0,
133
+ "num_beam_groups": 1,
134
+ "num_beams": 1,
135
+ "num_heads": 16,
136
+ "num_layers": 24,
137
+ "num_return_sequences": 1,
138
+ "output_attentions": false,
139
+ "output_hidden_states": false,
140
+ "output_scores": false,
141
+ "output_vocab_size": 1056,
142
+ "pad_token_id": null,
143
+ "prefix": null,
144
+ "problem_type": null,
145
+ "pruned_heads": {},
146
+ "remove_invalid_values": false,
147
+ "repetition_penalty": 1.0,
148
+ "return_dict": true,
149
+ "return_dict_in_generate": false,
150
+ "sep_token_id": null,
151
+ "suppress_tokens": null,
152
+ "task_specific_params": null,
153
+ "temperature": 1.0,
154
+ "tf_legacy_loss": false,
155
+ "tie_encoder_decoder": false,
156
+ "tie_word_embeddings": true,
157
+ "tokenizer_class": null,
158
+ "top_k": 50,
159
+ "top_p": 1.0,
160
+ "torch_dtype": "float32",
161
+ "torchscript": false,
162
+ "transformers_version": "4.31.0.dev0",
163
+ "typical_p": 1.0,
164
+ "use_bfloat16": false,
165
+ "use_cache": true
166
+ },
167
+ "model_type": "bark",
168
+ "n_coarse_codebooks": 2,
169
+ "n_fine_codebooks": 8,
170
+ "pretrained_encodec_name_or_path": "facebook/encodec_24khz",
171
+ "sample_rate": 24000,
172
+ "semantic_config": {
173
+ "_name_or_path": "",
174
+ "add_cross_attention": false,
175
+ "architectures": [
176
+ "BarkSemanticModule"
177
+ ],
178
+ "bad_words_ids": null,
179
+ "begin_suppress_tokens": null,
180
+ "bias": false,
181
+ "block_size": 1024,
182
+ "bos_token_id": null,
183
+ "chunk_size_feed_forward": 0,
184
+ "cross_attention_hidden_size": null,
185
+ "decoder_start_token_id": null,
186
+ "diversity_penalty": 0.0,
187
+ "do_sample": false,
188
+ "dropout": 0.0,
189
+ "early_stopping": false,
190
+ "encoder_no_repeat_ngram_size": 0,
191
+ "eos_token_id": null,
192
+ "exponential_decay_length_penalty": null,
193
+ "finetuning_task": null,
194
+ "forced_bos_token_id": null,
195
+ "forced_eos_token_id": null,
196
+ "hidden_size": 1024,
197
+ "id2label": {
198
+ "0": "LABEL_0",
199
+ "1": "LABEL_1"
200
+ },
201
+ "initializer_range": 0.02,
202
+ "input_vocab_size": 129600,
203
+ "is_decoder": false,
204
+ "is_encoder_decoder": false,
205
+ "label2id": {
206
+ "LABEL_0": 0,
207
+ "LABEL_1": 1
208
+ },
209
+ "length_penalty": 1.0,
210
+ "max_length": 20,
211
+ "min_length": 0,
212
+ "model_type": "semantic",
213
+ "n_codes_given": 1,
214
+ "n_codes_total": 8,
215
+ "no_repeat_ngram_size": 0,
216
+ "num_beam_groups": 1,
217
+ "num_beams": 1,
218
+ "num_heads": 16,
219
+ "num_layers": 24,
220
+ "num_return_sequences": 1,
221
+ "output_attentions": false,
222
+ "output_hidden_states": false,
223
+ "output_scores": false,
224
+ "output_vocab_size": 10048,
225
+ "pad_token_id": null,
226
+ "prefix": null,
227
+ "problem_type": null,
228
+ "pruned_heads": {},
229
+ "remove_invalid_values": false,
230
+ "repetition_penalty": 1.0,
231
+ "return_dict": true,
232
+ "return_dict_in_generate": false,
233
+ "sep_token_id": null,
234
+ "suppress_tokens": null,
235
+ "task_specific_params": null,
236
+ "temperature": 1.0,
237
+ "tf_legacy_loss": false,
238
+ "tie_encoder_decoder": false,
239
+ "tie_word_embeddings": true,
240
+ "tokenizer_class": null,
241
+ "top_k": 50,
242
+ "top_p": 1.0,
243
+ "torch_dtype": "float32",
244
+ "torchscript": false,
245
+ "transformers_version": "4.31.0.dev0",
246
+ "typical_p": 1.0,
247
+ "use_bfloat16": false,
248
+ "use_cache": true
249
+ },
250
+ "semantic_infer_token": 129599,
251
+ "semantic_pad_token": 10000,
252
+ "semantic_rate_hz": 49.9,
253
+ "semantic_vocab_size": 10000,
254
+ "text_encoding_offset": 10048,
255
+ "text_pad_token": 129595,
256
+ "torch_dtype": "float32",
257
+ "transformers_version": null
258
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf76aa6b57704035ad1f3fbe4e97a3a9c9dd38120fc80e3c8eb6335e3a8e3561
3
+ size 8880129315