Safetensors
llama
Pclanglais commited on
Commit
9fac57f
1 Parent(s): 16acec1

Upload folder using huggingface_hub

Browse files
config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "LlamaForCausalLM"
4
+ ],
5
+ "attention_bias": false,
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 1,
8
+ "eos_token_id": 2,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 1024,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 2560,
13
+ "max_position_embeddings": 2048,
14
+ "mlp_bias": false,
15
+ "model_type": "llama",
16
+ "num_attention_heads": 16,
17
+ "num_hidden_layers": 26,
18
+ "num_key_value_heads": 8,
19
+ "pretraining_tp": 1,
20
+ "rms_norm_eps": 1e-05,
21
+ "rope_scaling": null,
22
+ "rope_theta": 10000,
23
+ "tie_word_embeddings": true,
24
+ "torch_dtype": "bfloat16",
25
+ "transformers_version": "4.44.2",
26
+ "use_cache": true,
27
+ "vocab_size": 65536
28
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "transformers_version": "4.44.2"
6
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9ccd5f97a42a2f14577ef345edbfca238c0064f9206d1d2b232cdc24b834d6b1
3
+ size 706875632
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|tool_call_start|>",
4
+ "<|tool_call_end|>",
5
+ "<|tool_list_start|>",
6
+ "<|tool_list_end|>",
7
+ "<|source_analysis_start|>",
8
+ "<|source_analysis_end|>",
9
+ "<|source_start|>",
10
+ "<|source_end|>",
11
+ "<|im_start|>",
12
+ "<|im_end|>",
13
+ "<|answer_start|>",
14
+ "<|answer_end|>",
15
+ "<|text_start|>",
16
+ "<|text_end|>",
17
+ "<|translation_start|>",
18
+ "<|translation_end|>",
19
+ "<|back_translation_start|>",
20
+ "<|back_translation_end|>",
21
+ "<|ocr_correction_start|>",
22
+ "<|ocr_correction_end|>",
23
+ "<|json_scheme_start|>",
24
+ "<|json_scheme_end|>",
25
+ "<|source_interpretation_start|>",
26
+ "<|source_interpretation_end|>",
27
+ "<|query_start|>",
28
+ "<|query_end|>"
29
+ ]
30
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,275 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[UNK]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<|begin_of_text|>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "<|end_of_text|>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "[PAD]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "65510": {
36
+ "content": "<|tool_call_start|>",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ },
43
+ "65511": {
44
+ "content": "<|tool_call_end|>",
45
+ "lstrip": false,
46
+ "normalized": false,
47
+ "rstrip": false,
48
+ "single_word": false,
49
+ "special": true
50
+ },
51
+ "65512": {
52
+ "content": "<|tool_list_start|>",
53
+ "lstrip": false,
54
+ "normalized": false,
55
+ "rstrip": false,
56
+ "single_word": false,
57
+ "special": true
58
+ },
59
+ "65513": {
60
+ "content": "<|tool_list_end|>",
61
+ "lstrip": false,
62
+ "normalized": false,
63
+ "rstrip": false,
64
+ "single_word": false,
65
+ "special": true
66
+ },
67
+ "65514": {
68
+ "content": "<|source_analysis_start|>",
69
+ "lstrip": false,
70
+ "normalized": false,
71
+ "rstrip": false,
72
+ "single_word": false,
73
+ "special": true
74
+ },
75
+ "65515": {
76
+ "content": "<|source_analysis_end|>",
77
+ "lstrip": false,
78
+ "normalized": false,
79
+ "rstrip": false,
80
+ "single_word": false,
81
+ "special": true
82
+ },
83
+ "65516": {
84
+ "content": "<|source_start|>",
85
+ "lstrip": false,
86
+ "normalized": false,
87
+ "rstrip": false,
88
+ "single_word": false,
89
+ "special": true
90
+ },
91
+ "65517": {
92
+ "content": "<|source_end|>",
93
+ "lstrip": false,
94
+ "normalized": false,
95
+ "rstrip": false,
96
+ "single_word": false,
97
+ "special": true
98
+ },
99
+ "65518": {
100
+ "content": "<|im_start|>",
101
+ "lstrip": false,
102
+ "normalized": false,
103
+ "rstrip": false,
104
+ "single_word": false,
105
+ "special": true
106
+ },
107
+ "65519": {
108
+ "content": "<|im_end|>",
109
+ "lstrip": false,
110
+ "normalized": false,
111
+ "rstrip": false,
112
+ "single_word": false,
113
+ "special": true
114
+ },
115
+ "65520": {
116
+ "content": "<|answer_start|>",
117
+ "lstrip": false,
118
+ "normalized": false,
119
+ "rstrip": false,
120
+ "single_word": false,
121
+ "special": true
122
+ },
123
+ "65521": {
124
+ "content": "<|answer_end|>",
125
+ "lstrip": false,
126
+ "normalized": false,
127
+ "rstrip": false,
128
+ "single_word": false,
129
+ "special": true
130
+ },
131
+ "65522": {
132
+ "content": "<|text_start|>",
133
+ "lstrip": false,
134
+ "normalized": false,
135
+ "rstrip": false,
136
+ "single_word": false,
137
+ "special": true
138
+ },
139
+ "65523": {
140
+ "content": "<|text_end|>",
141
+ "lstrip": false,
142
+ "normalized": false,
143
+ "rstrip": false,
144
+ "single_word": false,
145
+ "special": true
146
+ },
147
+ "65524": {
148
+ "content": "<|translation_start|>",
149
+ "lstrip": false,
150
+ "normalized": false,
151
+ "rstrip": false,
152
+ "single_word": false,
153
+ "special": true
154
+ },
155
+ "65525": {
156
+ "content": "<|translation_end|>",
157
+ "lstrip": false,
158
+ "normalized": false,
159
+ "rstrip": false,
160
+ "single_word": false,
161
+ "special": true
162
+ },
163
+ "65526": {
164
+ "content": "<|back_translation_start|>",
165
+ "lstrip": false,
166
+ "normalized": false,
167
+ "rstrip": false,
168
+ "single_word": false,
169
+ "special": true
170
+ },
171
+ "65527": {
172
+ "content": "<|back_translation_end|>",
173
+ "lstrip": false,
174
+ "normalized": false,
175
+ "rstrip": false,
176
+ "single_word": false,
177
+ "special": true
178
+ },
179
+ "65528": {
180
+ "content": "<|ocr_correction_start|>",
181
+ "lstrip": false,
182
+ "normalized": false,
183
+ "rstrip": false,
184
+ "single_word": false,
185
+ "special": true
186
+ },
187
+ "65529": {
188
+ "content": "<|ocr_correction_end|>",
189
+ "lstrip": false,
190
+ "normalized": false,
191
+ "rstrip": false,
192
+ "single_word": false,
193
+ "special": true
194
+ },
195
+ "65530": {
196
+ "content": "<|json_scheme_start|>",
197
+ "lstrip": false,
198
+ "normalized": false,
199
+ "rstrip": false,
200
+ "single_word": false,
201
+ "special": true
202
+ },
203
+ "65531": {
204
+ "content": "<|json_scheme_end|>",
205
+ "lstrip": false,
206
+ "normalized": false,
207
+ "rstrip": false,
208
+ "single_word": false,
209
+ "special": true
210
+ },
211
+ "65532": {
212
+ "content": "<|source_interpretation_start|>",
213
+ "lstrip": false,
214
+ "normalized": false,
215
+ "rstrip": false,
216
+ "single_word": false,
217
+ "special": true
218
+ },
219
+ "65533": {
220
+ "content": "<|source_interpretation_end|>",
221
+ "lstrip": false,
222
+ "normalized": false,
223
+ "rstrip": false,
224
+ "single_word": false,
225
+ "special": true
226
+ },
227
+ "65534": {
228
+ "content": "<|query_start|>",
229
+ "lstrip": false,
230
+ "normalized": false,
231
+ "rstrip": false,
232
+ "single_word": false,
233
+ "special": true
234
+ },
235
+ "65535": {
236
+ "content": "<|query_end|>",
237
+ "lstrip": false,
238
+ "normalized": false,
239
+ "rstrip": false,
240
+ "single_word": false,
241
+ "special": true
242
+ }
243
+ },
244
+ "additional_special_tokens": [
245
+ "<|tool_call_start|>",
246
+ "<|tool_call_end|>",
247
+ "<|tool_list_start|>",
248
+ "<|tool_list_end|>",
249
+ "<|source_analysis_start|>",
250
+ "<|source_analysis_end|>",
251
+ "<|source_start|>",
252
+ "<|source_end|>",
253
+ "<|im_start|>",
254
+ "<|im_end|>",
255
+ "<|answer_start|>",
256
+ "<|answer_end|>",
257
+ "<|text_start|>",
258
+ "<|text_end|>",
259
+ "<|translation_start|>",
260
+ "<|translation_end|>",
261
+ "<|back_translation_start|>",
262
+ "<|back_translation_end|>",
263
+ "<|ocr_correction_start|>",
264
+ "<|ocr_correction_end|>",
265
+ "<|json_scheme_start|>",
266
+ "<|json_scheme_end|>",
267
+ "<|source_interpretation_start|>",
268
+ "<|source_interpretation_end|>",
269
+ "<|query_start|>",
270
+ "<|query_end|>"
271
+ ],
272
+ "clean_up_tokenization_spaces": true,
273
+ "model_max_length": 1000000000000000019884624838656,
274
+ "tokenizer_class": "PreTrainedTokenizerFast"
275
+ }