Xenova HF staff commited on
Commit
2b9efa4
1 Parent(s): d2b62f4

Upload folder using huggingface_hub

Browse files
added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "<unk>": 38
3
+ }
config.json ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "echarlaix/tiny-random-vits",
3
+ "activation_dropout": 0.1,
4
+ "architectures": [
5
+ "VitsModel"
6
+ ],
7
+ "attention_dropout": 0.1,
8
+ "depth_separable_channels": 2,
9
+ "depth_separable_num_layers": 3,
10
+ "duration_predictor_dropout": 0.5,
11
+ "duration_predictor_filter_channels": 16,
12
+ "duration_predictor_flow_bins": 10,
13
+ "duration_predictor_kernel_size": 3,
14
+ "duration_predictor_num_flows": 2,
15
+ "duration_predictor_tail_bound": 5.0,
16
+ "ffn_dim": 64,
17
+ "ffn_kernel_size": 3,
18
+ "flow_size": 16,
19
+ "hidden_act": "relu",
20
+ "hidden_dropout": 0.1,
21
+ "hidden_size": 16,
22
+ "initializer_range": 0.02,
23
+ "layer_norm_eps": 1e-05,
24
+ "layerdrop": 0.1,
25
+ "leaky_relu_slope": 0.1,
26
+ "model_type": "vits",
27
+ "noise_scale": 0.667,
28
+ "noise_scale_duration": 0.8,
29
+ "num_attention_heads": 2,
30
+ "num_hidden_layers": 2,
31
+ "num_speakers": 1,
32
+ "posterior_encoder_num_wavenet_layers": 2,
33
+ "prior_encoder_num_flows": 2,
34
+ "prior_encoder_num_wavenet_layers": 4,
35
+ "resblock_dilation_sizes": [
36
+ [
37
+ 1,
38
+ 3,
39
+ 5
40
+ ],
41
+ [
42
+ 1,
43
+ 3,
44
+ 5
45
+ ],
46
+ [
47
+ 1,
48
+ 3,
49
+ 5
50
+ ]
51
+ ],
52
+ "resblock_kernel_sizes": [
53
+ 3,
54
+ 7,
55
+ 11
56
+ ],
57
+ "sampling_rate": 16000,
58
+ "speaker_embedding_size": 0,
59
+ "speaking_rate": 1.0,
60
+ "spectrogram_bins": 8,
61
+ "transformers_version": "4.37.0.dev0",
62
+ "upsample_initial_channel": 16,
63
+ "upsample_kernel_sizes": [
64
+ 16,
65
+ 16,
66
+ 4,
67
+ 4
68
+ ],
69
+ "upsample_rates": [
70
+ 8,
71
+ 8,
72
+ 2,
73
+ 2
74
+ ],
75
+ "use_bias": true,
76
+ "use_stochastic_duration_prediction": false,
77
+ "vocab_size": 38,
78
+ "wavenet_dilation_rate": 1,
79
+ "wavenet_dropout": 0.0,
80
+ "wavenet_kernel_size": 5,
81
+ "window_size": 4
82
+ }
onnx/model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:83a71a4fecef66c8c69ad9429a2e5c7ff8c887a322ab4f78fb76e8da022162f0
3
+ size 523831
onnx/model_quantized.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8071190dfa6e7d4374ca632d151e9538c41b8ac7dca88e92595f86b5f66b1eee
3
+ size 644458
quantize_config.json ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "per_channel": true,
3
+ "reduce_range": true,
4
+ "per_model_config": {
5
+ "model": {
6
+ "op_types": [
7
+ "ReduceMax",
8
+ "RandomNormalLike",
9
+ "CumSum",
10
+ "Add",
11
+ "Relu",
12
+ "Split",
13
+ "Identity",
14
+ "Cast",
15
+ "Neg",
16
+ "Expand",
17
+ "Pow",
18
+ "MatMul",
19
+ "Tanh",
20
+ "LeakyRelu",
21
+ "Gather",
22
+ "Shape",
23
+ "Constant",
24
+ "Sqrt",
25
+ "Equal",
26
+ "ReduceMean",
27
+ "Transpose",
28
+ "Concat",
29
+ "ConvTranspose",
30
+ "Range",
31
+ "Sub",
32
+ "Clip",
33
+ "Squeeze",
34
+ "Reshape",
35
+ "Slice",
36
+ "Div",
37
+ "Conv",
38
+ "Sigmoid",
39
+ "Mul",
40
+ "ConstantOfShape",
41
+ "Pad",
42
+ "Where",
43
+ "Ceil",
44
+ "ReduceSum",
45
+ "Less",
46
+ "Unsqueeze",
47
+ "Exp",
48
+ "Softmax"
49
+ ],
50
+ "weight_type": "QUInt8"
51
+ }
52
+ }
53
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "pad_token": {
3
+ "content": "k",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "unk_token": {
10
+ "content": "<unk>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ }
16
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_blank": true,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "k",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "38": {
13
+ "content": "<unk>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ }
20
+ },
21
+ "clean_up_tokenization_spaces": true,
22
+ "is_uroman": false,
23
+ "language": "eng",
24
+ "model_max_length": 4096,
25
+ "normalize": true,
26
+ "pad_token": "k",
27
+ "phonemize": false,
28
+ "tokenizer_class": "VitsTokenizer",
29
+ "unk_token": "<unk>"
30
+ }
vocab.json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ " ": 19,
3
+ "'": 1,
4
+ "-": 14,
5
+ "0": 23,
6
+ "1": 15,
7
+ "2": 28,
8
+ "3": 11,
9
+ "4": 27,
10
+ "5": 35,
11
+ "6": 36,
12
+ "_": 30,
13
+ "a": 26,
14
+ "b": 24,
15
+ "c": 12,
16
+ "d": 5,
17
+ "e": 7,
18
+ "f": 20,
19
+ "g": 37,
20
+ "h": 6,
21
+ "i": 18,
22
+ "j": 16,
23
+ "k": 0,
24
+ "l": 21,
25
+ "m": 17,
26
+ "n": 29,
27
+ "o": 22,
28
+ "p": 13,
29
+ "q": 34,
30
+ "r": 25,
31
+ "s": 8,
32
+ "t": 33,
33
+ "u": 4,
34
+ "v": 32,
35
+ "w": 9,
36
+ "x": 31,
37
+ "y": 3,
38
+ "z": 2,
39
+ "–": 10
40
+ }