hans00 commited on
Commit
6c14801
1 Parent(s): 8e90459

Upload folder using huggingface_hub

Browse files
added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "<unk>": 53
3
+ }
config.json ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "./vits-cmn",
3
+ "activation_dropout": 0.1,
4
+ "architectures": [
5
+ "VitsModel"
6
+ ],
7
+ "attention_dropout": 0.1,
8
+ "depth_separable_channels": 2,
9
+ "depth_separable_num_layers": 3,
10
+ "duration_predictor_dropout": 0.5,
11
+ "duration_predictor_filter_channels": 256,
12
+ "duration_predictor_flow_bins": 10,
13
+ "duration_predictor_kernel_size": 3,
14
+ "duration_predictor_num_flows": 4,
15
+ "duration_predictor_tail_bound": 5.0,
16
+ "ffn_dim": 768,
17
+ "ffn_kernel_size": 3,
18
+ "flow_size": 192,
19
+ "hidden_act": "relu",
20
+ "hidden_dropout": 0.1,
21
+ "hidden_size": 192,
22
+ "initializer_range": 0.02,
23
+ "layer_norm_eps": 1e-05,
24
+ "layerdrop": 0.1,
25
+ "leaky_relu_slope": 0.1,
26
+ "model_type": "vits",
27
+ "noise_scale": 0.667,
28
+ "noise_scale_duration": 0.8,
29
+ "num_attention_heads": 2,
30
+ "num_hidden_layers": 6,
31
+ "num_speakers": 44,
32
+ "posterior_encoder_num_wavenet_layers": 16,
33
+ "prior_encoder_num_flows": 4,
34
+ "prior_encoder_num_wavenet_layers": 4,
35
+ "resblock_dilation_sizes": [
36
+ [
37
+ 1,
38
+ 3,
39
+ 5
40
+ ],
41
+ [
42
+ 1,
43
+ 3,
44
+ 5
45
+ ],
46
+ [
47
+ 1,
48
+ 3,
49
+ 5
50
+ ]
51
+ ],
52
+ "resblock_kernel_sizes": [
53
+ 3,
54
+ 7,
55
+ 11
56
+ ],
57
+ "sampling_rate": 16000,
58
+ "speaker_embedding_size": 256,
59
+ "speaking_rate": 1.0,
60
+ "spectrogram_bins": 513,
61
+ "transformers_version": "4.36.2",
62
+ "upsample_initial_channel": 512,
63
+ "upsample_kernel_sizes": [
64
+ 16,
65
+ 16,
66
+ 4,
67
+ 4
68
+ ],
69
+ "upsample_rates": [
70
+ 8,
71
+ 8,
72
+ 2,
73
+ 2
74
+ ],
75
+ "use_bias": true,
76
+ "use_stochastic_duration_prediction": true,
77
+ "vocab_size": 53,
78
+ "wavenet_dilation_rate": 1,
79
+ "wavenet_dropout": 0.0,
80
+ "wavenet_kernel_size": 5,
81
+ "window_size": 4
82
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6f64defc393b53a54a1ad923c457f87e88179fc90a0186a958b27b5a2c6fb409
3
+ size 158691200
onnx/model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d6cf71c28ac68d5943750626fd73cdc3a3ae5ab761a919bfe061a542f0c2688
3
+ size 114225562
onnx/model_quantized.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:179b0f4aa84e99b0667a759cce5ba974c622f0045e8e0bd0e7adb669cabab785
3
+ size 38343972
quantize_config.json ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "per_channel": true,
3
+ "reduce_range": true,
4
+ "per_model_config": {
5
+ "model": {
6
+ "op_types": [
7
+ "Softmax",
8
+ "Split",
9
+ "Concat",
10
+ "Transpose",
11
+ "Pow",
12
+ "Relu",
13
+ "Div",
14
+ "Softplus",
15
+ "ConstantOfShape",
16
+ "Cast",
17
+ "Sub",
18
+ "Expand",
19
+ "LeakyRelu",
20
+ "MatMul",
21
+ "ReduceSum",
22
+ "Exp",
23
+ "Sqrt",
24
+ "Not",
25
+ "GatherND",
26
+ "GatherElements",
27
+ "Add",
28
+ "Equal",
29
+ "Where",
30
+ "Tanh",
31
+ "And",
32
+ "LessOrEqual",
33
+ "Erf",
34
+ "ScatterND",
35
+ "Squeeze",
36
+ "CumSum",
37
+ "ConvTranspose",
38
+ "Gather",
39
+ "Mul",
40
+ "Less",
41
+ "Ceil",
42
+ "Pad",
43
+ "Clip",
44
+ "Shape",
45
+ "Reshape",
46
+ "LayerNormalization",
47
+ "Conv",
48
+ "RandomNormalLike",
49
+ "Unsqueeze",
50
+ "ReduceMax",
51
+ "Sigmoid",
52
+ "Constant",
53
+ "Range",
54
+ "Neg",
55
+ "Slice",
56
+ "GreaterOrEqual",
57
+ "NonZero"
58
+ ],
59
+ "weight_type": "QUInt8"
60
+ }
61
+ }
62
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "pad_token": {
3
+ "content": "_",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "unk_token": {
10
+ "content": "<unk>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ }
16
+ }
tokenizer.json ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": "1.0",
3
+ "truncation": null,
4
+ "padding": null,
5
+ "added_tokens": [
6
+ {
7
+ "id": 53,
8
+ "content": "<unk>",
9
+ "single_word": false,
10
+ "lstrip": false,
11
+ "rstrip": false,
12
+ "normalized": false,
13
+ "special": true
14
+ }
15
+ ],
16
+ "normalizer": {
17
+ "type": "Sequence",
18
+ "normalizers": [
19
+ {
20
+ "type": "Lowercase"
21
+ },
22
+ {
23
+ "type": "Replace",
24
+ "pattern": {
25
+ "Regex": "[^_ abcdefghijklmnopqrstuwxyz\u00e0\u00e1\u00e8\u00e9\u00ec\u00ed\u00f2\u00f3\u00f9\u00fa\u00fc\u0101\u0113\u011b\u012b\u0144\u014d\u016b\u01ce\u01d0\u01d2\u01d4\u01d8\u01da\u01dc\u1e3f]"
26
+ },
27
+ "content": ""
28
+ },
29
+ {
30
+ "type": "Strip",
31
+ "strip_left": true,
32
+ "strip_right": true
33
+ },
34
+ {
35
+ "type": "Replace",
36
+ "pattern": {
37
+ "Regex": "(?=.)|(?<!^)$"
38
+ },
39
+ "content": "_"
40
+ }
41
+ ]
42
+ },
43
+ "pre_tokenizer": {
44
+ "type": "Split",
45
+ "pattern": {
46
+ "Regex": ""
47
+ },
48
+ "behavior": "Isolated",
49
+ "invert": false
50
+ },
51
+ "post_processor": null,
52
+ "decoder": null,
53
+ "model": {
54
+ "vocab": {
55
+ "_": 0,
56
+ " ": 1,
57
+ "a": 2,
58
+ "b": 3,
59
+ "c": 4,
60
+ "d": 5,
61
+ "e": 6,
62
+ "f": 7,
63
+ "g": 8,
64
+ "h": 9,
65
+ "i": 10,
66
+ "j": 11,
67
+ "k": 12,
68
+ "l": 13,
69
+ "m": 14,
70
+ "n": 15,
71
+ "o": 16,
72
+ "p": 17,
73
+ "q": 18,
74
+ "r": 19,
75
+ "s": 20,
76
+ "t": 21,
77
+ "u": 22,
78
+ "w": 23,
79
+ "x": 24,
80
+ "y": 25,
81
+ "z": 26,
82
+ "\u00e0": 27,
83
+ "\u00e1": 28,
84
+ "\u00e8": 29,
85
+ "\u00e9": 30,
86
+ "\u00ec": 31,
87
+ "\u00ed": 32,
88
+ "\u00f2": 33,
89
+ "\u00f3": 34,
90
+ "\u00f9": 35,
91
+ "\u00fa": 36,
92
+ "\u00fc": 37,
93
+ "\u0101": 38,
94
+ "\u0113": 39,
95
+ "\u011b": 40,
96
+ "\u012b": 41,
97
+ "\u0144": 42,
98
+ "\u014d": 43,
99
+ "\u016b": 44,
100
+ "\u01ce": 45,
101
+ "\u01d0": 46,
102
+ "\u01d2": 47,
103
+ "\u01d4": 48,
104
+ "\u01d8": 49,
105
+ "\u01da": 50,
106
+ "\u01dc": 51,
107
+ "\u1e3f": 52,
108
+ "<unk>": 53
109
+ }
110
+ }
111
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_blank": true,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "_",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "53": {
13
+ "content": "<unk>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ }
20
+ },
21
+ "clean_up_tokenization_spaces": true,
22
+ "is_uroman": false,
23
+ "language": "cmn",
24
+ "model_max_length": 1000000000000000019884624838656,
25
+ "normalize": true,
26
+ "pad_token": "_",
27
+ "phonemize": false,
28
+ "tokenizer_class": "VitsTokenizer",
29
+ "unk_token": "<unk>"
30
+ }
vocab.json ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ " ": 1,
3
+ "_": 0,
4
+ "a": 2,
5
+ "b": 3,
6
+ "c": 4,
7
+ "d": 5,
8
+ "e": 6,
9
+ "f": 7,
10
+ "g": 8,
11
+ "h": 9,
12
+ "i": 10,
13
+ "j": 11,
14
+ "k": 12,
15
+ "l": 13,
16
+ "m": 14,
17
+ "n": 15,
18
+ "o": 16,
19
+ "p": 17,
20
+ "q": 18,
21
+ "r": 19,
22
+ "s": 20,
23
+ "t": 21,
24
+ "u": 22,
25
+ "w": 23,
26
+ "x": 24,
27
+ "y": 25,
28
+ "z": 26,
29
+ "à": 27,
30
+ "á": 28,
31
+ "è": 29,
32
+ "é": 30,
33
+ "ì": 31,
34
+ "í": 32,
35
+ "ò": 33,
36
+ "ó": 34,
37
+ "ù": 35,
38
+ "ú": 36,
39
+ "ü": 37,
40
+ "ā": 38,
41
+ "ē": 39,
42
+ "ě": 40,
43
+ "ī": 41,
44
+ "ń": 42,
45
+ "ō": 43,
46
+ "ū": 44,
47
+ "ǎ": 45,
48
+ "ǐ": 46,
49
+ "ǒ": 47,
50
+ "ǔ": 48,
51
+ "ǘ": 49,
52
+ "ǚ": 50,
53
+ "ǜ": 51,
54
+ "ḿ": 52
55
+ }