vrdn23 commited on
Commit
5545996
1 Parent(s): d5c9d4e

Upload model files to check if model loading works correctly

Browse files

Uploading both the PyTorch and ONNX model files to see if the loading works as expected

config.json ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "outputs_config_10",
3
+ "activation_dropout": 0.1,
4
+ "activation_function": "gelu",
5
+ "add_cross_attention": true,
6
+ "architectures": [
7
+ "BartForConditionalGeneration"
8
+ ],
9
+ "attention_dropout": 0.1,
10
+ "bos_token_id": 0,
11
+ "classifier_dropout": 0.0,
12
+ "cross_attention_hidden_size": 256,
13
+ "d_model": 256,
14
+ "decoder_attention_heads": 4,
15
+ "decoder_ffn_dim": 512,
16
+ "decoder_layerdrop": 0.1,
17
+ "decoder_layers": 3,
18
+ "decoder_start_token_id": 2,
19
+ "dropout": 0.3,
20
+ "encoder_attention_heads": 4,
21
+ "encoder_ffn_dim": 512,
22
+ "encoder_layerdrop": 0.1,
23
+ "encoder_layers": 3,
24
+ "eos_token_id": 2,
25
+ "forced_eos_token_id": 2,
26
+ "id2label": {
27
+ "0": "LABEL_0",
28
+ "1": "LABEL_1",
29
+ "2": "LABEL_2"
30
+ },
31
+ "init_std": 0.02,
32
+ "is_encoder_decoder": true,
33
+ "label2id": {
34
+ "LABEL_0": 0,
35
+ "LABEL_1": 1,
36
+ "LABEL_2": 2
37
+ },
38
+ "max_length": 128,
39
+ "max_position_embeddings": 128,
40
+ "model_type": "bart",
41
+ "num_hidden_layers": 3,
42
+ "pad_token_id": 1,
43
+ "scale_embedding": true,
44
+ "transformers_version": "4.28.0.dev0",
45
+ "use_cache": true,
46
+ "vocab_size": 103
47
+ }
decoder_model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c0d21bc4970274d00b359f42e7de711a7034f771888a73064c28449fa905e53c
3
+ size 9974403
decoder_model_merged.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dfdbe66a7c5876074c6fe12d50fb5ddfd8134f7c424cc7bb544d0cb46a2e3c7b
3
+ size 10090299
decoder_with_past_model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a6ed785e49c85370617903b1d39391fc7621b4bc261c6d8d772e6cf037f8427c
3
+ size 8371595
encoder_model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8d9aae528c64a7d81d793a394b4b71ea22106cc01ff5efd13c3fd19bc6854927
3
+ size 6629277
generation_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 0,
3
+ "decoder_start_token_id": 2,
4
+ "eos_token_id": 2,
5
+ "forced_eos_token_id": 2,
6
+ "max_length": 128,
7
+ "pad_token_id": 1,
8
+ "transformers_version": "4.28.0.dev0"
9
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4f42a7f4218ae57d098725c515c7fa8b9ac5eb7a8d9e526297509ade127a8c93
3
+ size 16236389
special_tokens_map.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "cls_token": "<s>",
4
+ "eos_token": "</s>",
5
+ "mask_token": {
6
+ "content": "<mask>",
7
+ "lstrip": true,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "pad_token": "<pad>",
13
+ "sep_token": "</s>",
14
+ "unk_token": "<unk>"
15
+ }
tokenizer.json ADDED
@@ -0,0 +1,184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": "1.0",
3
+ "truncation": {
4
+ "direction": "Right",
5
+ "max_length": 128,
6
+ "strategy": "LongestFirst",
7
+ "stride": 0
8
+ },
9
+ "padding": null,
10
+ "added_tokens": [
11
+ {
12
+ "id": 0,
13
+ "content": "<s>",
14
+ "single_word": false,
15
+ "lstrip": false,
16
+ "rstrip": false,
17
+ "normalized": false,
18
+ "special": true
19
+ },
20
+ {
21
+ "id": 1,
22
+ "content": "<pad>",
23
+ "single_word": false,
24
+ "lstrip": false,
25
+ "rstrip": false,
26
+ "normalized": false,
27
+ "special": true
28
+ },
29
+ {
30
+ "id": 2,
31
+ "content": "</s>",
32
+ "single_word": false,
33
+ "lstrip": false,
34
+ "rstrip": false,
35
+ "normalized": false,
36
+ "special": true
37
+ },
38
+ {
39
+ "id": 3,
40
+ "content": "<unk>",
41
+ "single_word": false,
42
+ "lstrip": false,
43
+ "rstrip": false,
44
+ "normalized": false,
45
+ "special": true
46
+ },
47
+ {
48
+ "id": 4,
49
+ "content": "<mask>",
50
+ "single_word": false,
51
+ "lstrip": true,
52
+ "rstrip": false,
53
+ "normalized": false,
54
+ "special": true
55
+ }
56
+ ],
57
+ "normalizer": null,
58
+ "pre_tokenizer": {
59
+ "type": "Whitespace"
60
+ },
61
+ "post_processor": {
62
+ "type": "RobertaProcessing",
63
+ "sep": [
64
+ "</s>",
65
+ 2
66
+ ],
67
+ "cls": [
68
+ "<s>",
69
+ 0
70
+ ],
71
+ "trim_offsets": true,
72
+ "add_prefix_space": false
73
+ },
74
+ "decoder": null,
75
+ "model": {
76
+ "type": "WordLevel",
77
+ "vocab": {
78
+ "<s>": 0,
79
+ "<pad>": 1,
80
+ "</s>": 2,
81
+ "<unk>": 3,
82
+ "<mask>": 4,
83
+ "e": 5,
84
+ "a": 6,
85
+ "s": 7,
86
+ "i": 8,
87
+ "r": 9,
88
+ "n": 10,
89
+ "AH0": 11,
90
+ "o": 12,
91
+ "N": 13,
92
+ "t": 14,
93
+ "l": 15,
94
+ "S": 16,
95
+ "L": 17,
96
+ "T": 18,
97
+ "R": 19,
98
+ "K": 20,
99
+ "c": 21,
100
+ "d": 22,
101
+ "D": 23,
102
+ "u": 24,
103
+ "IH0": 25,
104
+ "m": 26,
105
+ "M": 27,
106
+ "Z": 28,
107
+ "h": 29,
108
+ "g": 30,
109
+ "p": 31,
110
+ "ER0": 32,
111
+ "IY0": 33,
112
+ "b": 34,
113
+ "B": 35,
114
+ "P": 36,
115
+ "EH1": 37,
116
+ "AE1": 38,
117
+ "AA1": 39,
118
+ "y": 40,
119
+ "k": 41,
120
+ "IH1": 42,
121
+ "F": 43,
122
+ "f": 44,
123
+ "G": 45,
124
+ "w": 46,
125
+ "V": 47,
126
+ "v": 48,
127
+ "NG": 49,
128
+ "'": 50,
129
+ "IY1": 51,
130
+ "EY1": 52,
131
+ "HH": 53,
132
+ "W": 54,
133
+ "SH": 55,
134
+ "OW1": 56,
135
+ "AO1": 57,
136
+ "OW0": 58,
137
+ "AH1": 59,
138
+ "UW1": 60,
139
+ "AY1": 61,
140
+ "JH": 62,
141
+ "z": 63,
142
+ "CH": 64,
143
+ "Y": 65,
144
+ "AA0": 66,
145
+ "ER1": 67,
146
+ "EH2": 68,
147
+ "IH2": 69,
148
+ "TH": 70,
149
+ "AY2": 71,
150
+ "AE2": 72,
151
+ "EY2": 73,
152
+ "AA2": 74,
153
+ "EH0": 75,
154
+ "j": 76,
155
+ "AW1": 77,
156
+ "OW2": 78,
157
+ "x": 79,
158
+ "IY2": 80,
159
+ "UW0": 81,
160
+ "AO2": 82,
161
+ "UH1": 83,
162
+ "AE0": 84,
163
+ "q": 85,
164
+ "AO0": 86,
165
+ "AH2": 87,
166
+ "UW2": 88,
167
+ "AY0": 89,
168
+ "OY1": 90,
169
+ "-": 91,
170
+ "EY0": 92,
171
+ "DH": 93,
172
+ "AW2": 94,
173
+ "ER2": 95,
174
+ "ZH": 96,
175
+ "UH2": 97,
176
+ "AW0": 98,
177
+ "UH0": 99,
178
+ "OY2": 100,
179
+ "OY0": 101,
180
+ ".": 102
181
+ },
182
+ "unk_token": "<unk>"
183
+ }
184
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "bos_token": "<s>",
4
+ "clean_up_tokenization_spaces": true,
5
+ "cls_token": "<s>",
6
+ "eos_token": "</s>",
7
+ "errors": "replace",
8
+ "mask_token": "<mask>",
9
+ "model_max_length": 1000000000000000019884624838656,
10
+ "pad_token": "<pad>",
11
+ "sep_token": "</s>",
12
+ "special_tokens_map_file": null,
13
+ "tokenizer_class": "BartTokenizer",
14
+ "trim_offsets": true,
15
+ "unk_token": "<unk>"
16
+ }
vocab.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"<s>":0,"<pad>":1,"</s>":2,"<unk>":3,"<mask>":4,"e":5,"a":6,"s":7,"i":8,"r":9,"n":10,"AH0":11,"o":12,"N":13,"t":14,"l":15,"S":16,"L":17,"T":18,"R":19,"K":20,"c":21,"d":22,"D":23,"u":24,"IH0":25,"m":26,"M":27,"Z":28,"h":29,"g":30,"p":31,"ER0":32,"IY0":33,"b":34,"B":35,"P":36,"EH1":37,"AE1":38,"AA1":39,"y":40,"k":41,"IH1":42,"F":43,"f":44,"G":45,"w":46,"V":47,"v":48,"NG":49,"'":50,"IY1":51,"EY1":52,"HH":53,"W":54,"SH":55,"OW1":56,"AO1":57,"OW0":58,"AH1":59,"UW1":60,"AY1":61,"JH":62,"z":63,"CH":64,"Y":65,"AA0":66,"ER1":67,"EH2":68,"IH2":69,"TH":70,"AY2":71,"AE2":72,"EY2":73,"AA2":74,"EH0":75,"j":76,"AW1":77,"OW2":78,"x":79,"IY2":80,"UW0":81,"AO2":82,"UH1":83,"AE0":84,"q":85,"AO0":86,"AH2":87,"UW2":88,"AY0":89,"OY1":90,"-":91,"EY0":92,"DH":93,"AW2":94,"ER2":95,"ZH":96,"UH2":97,"AW0":98,"UH0":99,"OY2":100,"OY0":101,".":102}