Xenova HF staff commited on
Commit
b412dc1
1 Parent(s): 0d627dc

Upload folder using huggingface_hub

Browse files
config.json ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "Helsinki-NLP/opus-mt-tc-big-tr-en",
3
+ "activation_dropout": 0.0,
4
+ "activation_function": "relu",
5
+ "architectures": [
6
+ "MarianMTModel"
7
+ ],
8
+ "attention_dropout": 0.0,
9
+ "bad_words_ids": [
10
+ [
11
+ 57059
12
+ ]
13
+ ],
14
+ "bos_token_id": 0,
15
+ "classifier_dropout": 0.0,
16
+ "d_model": 1024,
17
+ "decoder_attention_heads": 16,
18
+ "decoder_ffn_dim": 4096,
19
+ "decoder_layerdrop": 0.0,
20
+ "decoder_layers": 6,
21
+ "decoder_start_token_id": 57059,
22
+ "decoder_vocab_size": 57060,
23
+ "dropout": 0.1,
24
+ "encoder_attention_heads": 16,
25
+ "encoder_ffn_dim": 4096,
26
+ "encoder_layerdrop": 0.0,
27
+ "encoder_layers": 6,
28
+ "eos_token_id": 43741,
29
+ "forced_eos_token_id": 43741,
30
+ "init_std": 0.02,
31
+ "is_encoder_decoder": true,
32
+ "max_length": 512,
33
+ "max_position_embeddings": 1024,
34
+ "model_type": "marian",
35
+ "normalize_embedding": false,
36
+ "num_beams": 4,
37
+ "num_hidden_layers": 6,
38
+ "pad_token_id": 57059,
39
+ "scale_embedding": true,
40
+ "share_encoder_decoder_embeddings": true,
41
+ "static_position_embeddings": true,
42
+ "transformers_version": "4.32.0.dev0",
43
+ "use_cache": true,
44
+ "vocab_size": 57060
45
+ }
generation_config.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bad_words_ids": [
4
+ [
5
+ 57059
6
+ ]
7
+ ],
8
+ "bos_token_id": 0,
9
+ "decoder_start_token_id": 57059,
10
+ "eos_token_id": 43741,
11
+ "forced_eos_token_id": 43741,
12
+ "max_length": 512,
13
+ "num_beams": 4,
14
+ "pad_token_id": 57059,
15
+ "transformers_version": "4.32.0.dev0"
16
+ }
onnx/decoder_model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fd8fca7108f0cfe2fd71769ed1b96d1dd52fc1e1d6f85ce72a9fbc8fc2b334d8
3
+ size 875253264
onnx/decoder_model_merged.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:efaa9fd6dfd5facddeb9c6e36df76c28dc982c2ebc20dd49e193aad41ac0c463
3
+ size 875487492
onnx/decoder_model_merged_quantized.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:76597856c34d278e624f8fcccfb7291807a132815b862d0cd9a84f4bb9879460
3
+ size 220760044
onnx/decoder_model_quantized.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:edb8d149e96e1d622ee98da1a994affc926b16fa4dc7684267a963a3053947e1
3
+ size 220390278
onnx/decoder_with_past_model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ff5783cadc597de352690762265a1a92969627029f4ba41fb6489ef67186121
3
+ size 824829419
onnx/decoder_with_past_model_quantized.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1949f1e57d22ffe30058c8b7a3b4f2623d9d59c1e2066cfa4af50dd87b4b5a33
3
+ size 207626334
onnx/encoder_model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:057c70458c4d99b235951bf82e2a1d27ed4e8f89cffcd596e167c47ce51a624a
3
+ size 540330961
onnx/encoder_model_quantized.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c3eb6bf194dde190e18c29e29a9f0eb38c24de7f01bc32ce23644c34a5a0683
3
+ size 135755056
quantize_config.json ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "per_channel": true,
3
+ "reduce_range": true,
4
+ "per_model_config": {
5
+ "encoder_model": {
6
+ "op_types": [
7
+ "Transpose",
8
+ "Shape",
9
+ "Where",
10
+ "Div",
11
+ "ReduceMean",
12
+ "Relu",
13
+ "Softmax",
14
+ "Sqrt",
15
+ "ConstantOfShape",
16
+ "Mul",
17
+ "Cast",
18
+ "MatMul",
19
+ "Range",
20
+ "Concat",
21
+ "Reshape",
22
+ "Expand",
23
+ "Gather",
24
+ "Unsqueeze",
25
+ "Add",
26
+ "Constant",
27
+ "Sub",
28
+ "Pow",
29
+ "Equal"
30
+ ],
31
+ "weight_type": "QInt8"
32
+ },
33
+ "decoder_with_past_model": {
34
+ "op_types": [
35
+ "Transpose",
36
+ "Shape",
37
+ "Where",
38
+ "Div",
39
+ "ReduceMean",
40
+ "Relu",
41
+ "Softmax",
42
+ "Sqrt",
43
+ "ConstantOfShape",
44
+ "Mul",
45
+ "Cast",
46
+ "MatMul",
47
+ "Range",
48
+ "Concat",
49
+ "Reshape",
50
+ "Expand",
51
+ "Gather",
52
+ "Unsqueeze",
53
+ "Add",
54
+ "Constant",
55
+ "Sub",
56
+ "Pow",
57
+ "Equal"
58
+ ],
59
+ "weight_type": "QInt8"
60
+ },
61
+ "decoder_model": {
62
+ "op_types": [
63
+ "Transpose",
64
+ "Shape",
65
+ "Where",
66
+ "Div",
67
+ "ReduceMean",
68
+ "Relu",
69
+ "Softmax",
70
+ "Sqrt",
71
+ "ConstantOfShape",
72
+ "Mul",
73
+ "Cast",
74
+ "MatMul",
75
+ "Range",
76
+ "Concat",
77
+ "Reshape",
78
+ "Less",
79
+ "Slice",
80
+ "Expand",
81
+ "Gather",
82
+ "Unsqueeze",
83
+ "Squeeze",
84
+ "Add",
85
+ "Constant",
86
+ "Sub",
87
+ "Pow",
88
+ "Equal"
89
+ ],
90
+ "weight_type": "QInt8"
91
+ },
92
+ "decoder_model_merged": {
93
+ "op_types": [
94
+ "Transpose",
95
+ "Shape",
96
+ "Where",
97
+ "Div",
98
+ "ReduceMean",
99
+ "Relu",
100
+ "Softmax",
101
+ "Sqrt",
102
+ "ConstantOfShape",
103
+ "Mul",
104
+ "Cast",
105
+ "MatMul",
106
+ "Range",
107
+ "If",
108
+ "Concat",
109
+ "Reshape",
110
+ "Less",
111
+ "Slice",
112
+ "Expand",
113
+ "Gather",
114
+ "Unsqueeze",
115
+ "Squeeze",
116
+ "Add",
117
+ "Constant",
118
+ "Sub",
119
+ "Pow",
120
+ "Equal"
121
+ ],
122
+ "weight_type": "QInt8"
123
+ }
124
+ }
125
+ }
source.spm ADDED
Binary file (833 kB). View file
 
special_tokens_map.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "eos_token": "</s>",
3
+ "pad_token": "<pad>",
4
+ "unk_token": "<unk>"
5
+ }
target.spm ADDED
Binary file (797 kB). View file
 
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "clean_up_tokenization_spaces": true,
3
+ "eos_token": "</s>",
4
+ "model_max_length": 512,
5
+ "pad_token": "<pad>",
6
+ "separate_vocabs": false,
7
+ "source_lang": "tr",
8
+ "sp_model_kwargs": {},
9
+ "target_lang": "en",
10
+ "tokenizer_class": "MarianTokenizer",
11
+ "unk_token": "<unk>"
12
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff