DewEfresh commited on
Commit
9c413d1
1 Parent(s): 3489309

Upload folder using huggingface_hub

Browse files
README.md ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model:
3
+ - m-a-p/neo_7b
4
+ - DewEfresh/neo_7b
5
+ tags:
6
+ - merge
7
+ - mergekit
8
+ - lazymergekit
9
+ - m-a-p/neo_7b
10
+ - DewEfresh/neo_7b
11
+ ---
12
+
13
+ # Neo_7b-merge5
14
+
15
+ Neo_7b-merge5 is a merge of the following models using [LazyMergekit](https://colab.research.google.com/drive/1obulZ1ROXHjYLn6PPZJwRR6GzgQogxxb?usp=sharing):
16
+ * [m-a-p/neo_7b](https://huggingface.co/m-a-p/neo_7b)
17
+ * [DewEfresh/neo_7b](https://huggingface.co/DewEfresh/neo_7b)
18
+
19
+ ## 🧩 Configuration
20
+
21
+ ```yaml
22
+ slices:
23
+ # Group 1
24
+ - sources:
25
+ - model: m-a-p/neo_7b
26
+ layer_range: [0, 0]
27
+ - model: DewEfresh/neo_7b
28
+ layer_range: [0, 0]
29
+ - sources:
30
+ - model: m-a-p/neo_7b
31
+ layer_range: [1, 1]
32
+ - model: DewEfresh/neo_7b
33
+ layer_range: [1, 1]
34
+ - sources:
35
+ - model: m-a-p/neo_7b
36
+ layer_range: [2, 2]
37
+ - model: DewEfresh/neo_7b
38
+ layer_range: [2, 2]
39
+ - sources:
40
+ - model: m-a-p/neo_7b
41
+ layer_range: [3, 3]
42
+ - model: DewEfresh/neo_7b
43
+ layer_range: [3, 3]
44
+ # Group 2
45
+ - sources:
46
+ - model: m-a-p/neo_7b
47
+ layer_range: [4, 4]
48
+ - model: DewEfresh/neo_7b
49
+ layer_range: [4, 4]
50
+ - sources:
51
+ - model: m-a-p/neo_7b
52
+ layer_range: [5, 5]
53
+ - model: DewEfresh/neo_7b
54
+ layer_range: [5, 5]
55
+ - sources:
56
+ - model: m-a-p/neo_7b
57
+ layer_range: [6, 6]
58
+ - model: DewEfresh/neo_7b
59
+ layer_range: [6, 6]
60
+ - sources:
61
+ - model: m-a-p/neo_7b
62
+ layer_range: [7, 7]
63
+ - model: DewEfresh/neo_7b
64
+ layer_range: [7, 7]
65
+ # Group 3
66
+ - sources:
67
+ - model: m-a-p/neo_7b
68
+ layer_range: [8, 8]
69
+ - model: DewEfresh/neo_7b
70
+ layer_range: [8, 8]
71
+ - sources:
72
+ - model: m-a-p/neo_7b
73
+ layer_range: [9, 9]
74
+ - model: DewEfresh/neo_7b
75
+ layer_range: [9, 9]
76
+ - sources:
77
+ - model: m-a-p/neo_7b
78
+ layer_range: [10, 10]
79
+ - model: DewEfresh/neo_7b
80
+ layer_range: [10, 10]
81
+ - sources:
82
+ - model: m-a-p/neo_7b
83
+ layer_range: [11, 11]
84
+ - model: DewEfresh/neo_7b
85
+ layer_range: [11, 11]
86
+ # Group 4
87
+ - sources:
88
+ - model: m-a-p/neo_7b
89
+ layer_range: [12, 12]
90
+ - model: DewEfresh/neo_7b
91
+ layer_range: [12, 12]
92
+ - sources:
93
+ - model: m-a-p/neo_7b
94
+ layer_range: [13, 13]
95
+ - model: DewEfresh/neo_7b
96
+ layer_range: [13, 13]
97
+ - sources:
98
+ - model: m-a-p/neo_7b
99
+ layer_range: [14, 14]
100
+ - model: DewEfresh/neo_7b
101
+ layer_range: [14, 14]
102
+ - sources:
103
+ - model: m-a-p/neo_7b
104
+ layer_range: [15, 15]
105
+ - model: DewEfresh/neo_7b
106
+ layer_range: [15, 15]
107
+ # Group 5
108
+ - sources:
109
+ - model: m-a-p/neo_7b
110
+ layer_range: [16, 16]
111
+ - model: DewEfresh/neo_7b
112
+ layer_range: [16, 16]
113
+ - sources:
114
+ - model: m-a-p/neo_7b
115
+ layer_range: [17, 17]
116
+ - model: DewEfresh/neo_7b
117
+ layer_range: [17, 17]
118
+ - sources:
119
+ - model: m-a-p/neo_7b
120
+ layer_range: [18, 18]
121
+ - model: DewEfresh/neo_7b
122
+ layer_range: [18, 18]
123
+ - sources:
124
+ - model: m-a-p/neo_7b
125
+ layer_range: [19, 19]
126
+ - model: DewEfresh/neo_7b
127
+ layer_range: [19, 19]
128
+ # Group 6
129
+ - sources:
130
+ - model: m-a-p/neo_7b
131
+ layer_range: [20, 20]
132
+ - model: DewEfresh/neo_7b
133
+ layer_range: [20, 20]
134
+ - sources:
135
+ - model: m-a-p/neo_7b
136
+ layer_range: [21, 21]
137
+ - model: DewEfresh/neo_7b
138
+ layer_range: [21, 21]
139
+ - sources:
140
+ - model: m-a-p/neo_7b
141
+ layer_range: [22, 22]
142
+ - model: DewEfresh/neo_7b
143
+ layer_range: [22, 22]
144
+ - sources:
145
+ - model: m-a-p/neo_7b
146
+ layer_range: [23, 23]
147
+ - model: DewEfresh/neo_7b
148
+ layer_range: [23, 23]
149
+ # Group 7 (last group)
150
+ - sources:
151
+ - model: m-a-p/neo_7b
152
+ layer_range: [24, 24]
153
+ - model: DewEfresh/neo_7b
154
+ layer_range: [24, 24]
155
+ - sources:
156
+ - model: m-a-p/neo_7b
157
+ layer_range: [25, 25]
158
+ - model: DewEfresh/neo_7b
159
+ layer_range: [25, 25]
160
+ - sources:
161
+ - model: m-a-p/neo_7b
162
+ layer_range: [26, 26]
163
+ - model: DewEfresh/neo_7b
164
+ layer_range: [26, 26]
165
+ - sources:
166
+ - model: m-a-p/neo_7b
167
+ layer_range: [27, 27]
168
+ - model: DewEfresh/neo_7b
169
+ layer_range: [27, 27]
170
+ merge_method: slerp
171
+ base_model: m-a-p/neo_7b
172
+ parameters:
173
+ t: 0.5 # Equal weight to both models
174
+ dtype: bfloat16
175
+ output_path: ./merged_map_dewefresh_neo_7b
176
+ ```
177
+
178
+ ## 💻 Usage
179
+
180
+ ```python
181
+ !pip install -qU transformers accelerate
182
+
183
+ from transformers import AutoTokenizer
184
+ import transformers
185
+ import torch
186
+
187
+ model = "DewEfresh/Neo_7b-merge5"
188
+ messages = [{"role": "user", "content": "What is a large language model?"}]
189
+
190
+ tokenizer = AutoTokenizer.from_pretrained(model)
191
+ prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
192
+ pipeline = transformers.pipeline(
193
+ "text-generation",
194
+ model=model,
195
+ torch_dtype=torch.float16,
196
+ device_map="auto",
197
+ )
198
+
199
+ outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
200
+ print(outputs[0]["generated_text"])
201
+ ```
added_tokens.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "<|CLS|>": 64000,
3
+ "<|EOD|>": 64002,
4
+ "<|MASK|>": 64003,
5
+ "<|PAD|>": 64004,
6
+ "<|SEP|>": 64001
7
+ }
config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "m-a-p/neo_7b",
3
+ "architectures": [
4
+ "LlamaForCausalLM"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "bos_token_id": 1,
9
+ "eos_token_id": 2,
10
+ "hidden_act": "silu",
11
+ "hidden_size": 3072,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 24576,
14
+ "max_position_embeddings": 8192,
15
+ "mlp_bias": false,
16
+ "model_type": "llama",
17
+ "num_attention_heads": 16,
18
+ "num_hidden_layers": 0,
19
+ "num_key_value_heads": 16,
20
+ "pretraining_tp": 1,
21
+ "rms_norm_eps": 1e-05,
22
+ "rope_scaling": null,
23
+ "rope_theta": 10000.0,
24
+ "tie_word_embeddings": false,
25
+ "torch_dtype": "bfloat16",
26
+ "transformers_version": "4.42.3",
27
+ "use_cache": true,
28
+ "vocab_size": 64256
29
+ }
mergekit_config.yml ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ slices:
3
+ # Group 1
4
+ - sources:
5
+ - model: m-a-p/neo_7b
6
+ layer_range: [0, 0]
7
+ - model: DewEfresh/neo_7b
8
+ layer_range: [0, 0]
9
+ - sources:
10
+ - model: m-a-p/neo_7b
11
+ layer_range: [1, 1]
12
+ - model: DewEfresh/neo_7b
13
+ layer_range: [1, 1]
14
+ - sources:
15
+ - model: m-a-p/neo_7b
16
+ layer_range: [2, 2]
17
+ - model: DewEfresh/neo_7b
18
+ layer_range: [2, 2]
19
+ - sources:
20
+ - model: m-a-p/neo_7b
21
+ layer_range: [3, 3]
22
+ - model: DewEfresh/neo_7b
23
+ layer_range: [3, 3]
24
+ # Group 2
25
+ - sources:
26
+ - model: m-a-p/neo_7b
27
+ layer_range: [4, 4]
28
+ - model: DewEfresh/neo_7b
29
+ layer_range: [4, 4]
30
+ - sources:
31
+ - model: m-a-p/neo_7b
32
+ layer_range: [5, 5]
33
+ - model: DewEfresh/neo_7b
34
+ layer_range: [5, 5]
35
+ - sources:
36
+ - model: m-a-p/neo_7b
37
+ layer_range: [6, 6]
38
+ - model: DewEfresh/neo_7b
39
+ layer_range: [6, 6]
40
+ - sources:
41
+ - model: m-a-p/neo_7b
42
+ layer_range: [7, 7]
43
+ - model: DewEfresh/neo_7b
44
+ layer_range: [7, 7]
45
+ # Group 3
46
+ - sources:
47
+ - model: m-a-p/neo_7b
48
+ layer_range: [8, 8]
49
+ - model: DewEfresh/neo_7b
50
+ layer_range: [8, 8]
51
+ - sources:
52
+ - model: m-a-p/neo_7b
53
+ layer_range: [9, 9]
54
+ - model: DewEfresh/neo_7b
55
+ layer_range: [9, 9]
56
+ - sources:
57
+ - model: m-a-p/neo_7b
58
+ layer_range: [10, 10]
59
+ - model: DewEfresh/neo_7b
60
+ layer_range: [10, 10]
61
+ - sources:
62
+ - model: m-a-p/neo_7b
63
+ layer_range: [11, 11]
64
+ - model: DewEfresh/neo_7b
65
+ layer_range: [11, 11]
66
+ # Group 4
67
+ - sources:
68
+ - model: m-a-p/neo_7b
69
+ layer_range: [12, 12]
70
+ - model: DewEfresh/neo_7b
71
+ layer_range: [12, 12]
72
+ - sources:
73
+ - model: m-a-p/neo_7b
74
+ layer_range: [13, 13]
75
+ - model: DewEfresh/neo_7b
76
+ layer_range: [13, 13]
77
+ - sources:
78
+ - model: m-a-p/neo_7b
79
+ layer_range: [14, 14]
80
+ - model: DewEfresh/neo_7b
81
+ layer_range: [14, 14]
82
+ - sources:
83
+ - model: m-a-p/neo_7b
84
+ layer_range: [15, 15]
85
+ - model: DewEfresh/neo_7b
86
+ layer_range: [15, 15]
87
+ # Group 5
88
+ - sources:
89
+ - model: m-a-p/neo_7b
90
+ layer_range: [16, 16]
91
+ - model: DewEfresh/neo_7b
92
+ layer_range: [16, 16]
93
+ - sources:
94
+ - model: m-a-p/neo_7b
95
+ layer_range: [17, 17]
96
+ - model: DewEfresh/neo_7b
97
+ layer_range: [17, 17]
98
+ - sources:
99
+ - model: m-a-p/neo_7b
100
+ layer_range: [18, 18]
101
+ - model: DewEfresh/neo_7b
102
+ layer_range: [18, 18]
103
+ - sources:
104
+ - model: m-a-p/neo_7b
105
+ layer_range: [19, 19]
106
+ - model: DewEfresh/neo_7b
107
+ layer_range: [19, 19]
108
+ # Group 6
109
+ - sources:
110
+ - model: m-a-p/neo_7b
111
+ layer_range: [20, 20]
112
+ - model: DewEfresh/neo_7b
113
+ layer_range: [20, 20]
114
+ - sources:
115
+ - model: m-a-p/neo_7b
116
+ layer_range: [21, 21]
117
+ - model: DewEfresh/neo_7b
118
+ layer_range: [21, 21]
119
+ - sources:
120
+ - model: m-a-p/neo_7b
121
+ layer_range: [22, 22]
122
+ - model: DewEfresh/neo_7b
123
+ layer_range: [22, 22]
124
+ - sources:
125
+ - model: m-a-p/neo_7b
126
+ layer_range: [23, 23]
127
+ - model: DewEfresh/neo_7b
128
+ layer_range: [23, 23]
129
+ # Group 7 (last group)
130
+ - sources:
131
+ - model: m-a-p/neo_7b
132
+ layer_range: [24, 24]
133
+ - model: DewEfresh/neo_7b
134
+ layer_range: [24, 24]
135
+ - sources:
136
+ - model: m-a-p/neo_7b
137
+ layer_range: [25, 25]
138
+ - model: DewEfresh/neo_7b
139
+ layer_range: [25, 25]
140
+ - sources:
141
+ - model: m-a-p/neo_7b
142
+ layer_range: [26, 26]
143
+ - model: DewEfresh/neo_7b
144
+ layer_range: [26, 26]
145
+ - sources:
146
+ - model: m-a-p/neo_7b
147
+ layer_range: [27, 27]
148
+ - model: DewEfresh/neo_7b
149
+ layer_range: [27, 27]
150
+ merge_method: slerp
151
+ base_model: m-a-p/neo_7b
152
+ parameters:
153
+ t: 0.5 # Equal weight to both models
154
+ dtype: bfloat16
155
+ output_path: ./merged_map_dewefresh_neo_7b
model-00001-of-00001.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:41209159383a6438fa1e73146df470aae582c40bf510ec2495a3f8780477ce87
3
+ size 789584192
model-00001-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:93f2a7f3db85521671732789bb8fd1bb3ae3a7e0d33170ea530f53a25346fcdd
3
+ size 4998668592
model-00002-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1227a57ac0fc74f5a2c223e5e41dd53ba09f50acbf6d63e5f166a8e1fc740c5e
3
+ size 4926336584
model-00003-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5abc28b3d1b89d4077e831f565ea2b0794a20ade31c750e450a00c7ebc9327ea
3
+ size 4907455792
model-00004-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:028abab236ed774f37153fc8d7c0dc2f87b34b3dc964d20ffab314a43f3d6706
3
+ size 226505520
model.safetensors.index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"metadata": {"mergekit_version": "0.0.4.4", "total_size": 789583872}, "weight_map": {"lm_head.weight": "model-00001-of-00001.safetensors", "model.embed_tokens.weight": "model-00001-of-00001.safetensors", "model.norm.weight": "model-00001-of-00001.safetensors"}}
special_tokens_map.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|CLS|>",
4
+ "<|SEP|>",
5
+ "<|EOD|>",
6
+ "<|MASK|>",
7
+ "<|PAD|>"
8
+ ],
9
+ "bos_token": {
10
+ "content": "<s>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "eos_token": {
17
+ "content": "</s>",
18
+ "lstrip": false,
19
+ "normalized": true,
20
+ "rstrip": false,
21
+ "single_word": true
22
+ },
23
+ "pad_token": {
24
+ "content": "<unk>",
25
+ "lstrip": false,
26
+ "normalized": true,
27
+ "rstrip": false,
28
+ "single_word": true
29
+ },
30
+ "unk_token": {
31
+ "content": "<unk>",
32
+ "lstrip": false,
33
+ "normalized": true,
34
+ "rstrip": false,
35
+ "single_word": true
36
+ }
37
+ }
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f6a2447b0e5664cabb2481587597102d82f42f0ccb7ef22e1c2d95494a8b03c5
3
+ size 1002561
tokenizer_config.json ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<unk>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": true,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<s>",
15
+ "lstrip": false,
16
+ "normalized": true,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": true,
25
+ "rstrip": false,
26
+ "single_word": true,
27
+ "special": true
28
+ },
29
+ "64000": {
30
+ "content": "<|CLS|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "64001": {
38
+ "content": "<|SEP|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "64002": {
46
+ "content": "<|EOD|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "64003": {
54
+ "content": "<|MASK|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "64004": {
62
+ "content": "<|PAD|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ }
69
+ },
70
+ "additional_special_tokens": [
71
+ "<|CLS|>",
72
+ "<|SEP|>",
73
+ "<|EOD|>",
74
+ "<|MASK|>",
75
+ "<|PAD|>"
76
+ ],
77
+ "auto_map": {
78
+ "AutoTokenizer": [
79
+ "m-a-p/neo_7b--tokenization_neo.NEOTokenizer",
80
+ null
81
+ ]
82
+ },
83
+ "bos_token": "<s>",
84
+ "chat_template": "{% set system_message = 'You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.\\n\\nIf a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don\\'t know the answer to a question, please don\\'t share false information.' %}{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% endif %}{% for message in messages %}{% set content = message['content'] %}{% if loop.index0 == 0 and system_message is defined %}{% set content = '<<SYS>>\\n' + system_message + '\\n<</SYS>>\\n\\n' + message['content'] %}{% endif %}{% if message['role'] == 'user' %}{{ '<s>' + '[INST] ' + content + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ content + '</s>' }}{% endif %}{% endfor %}",
85
+ "clean_up_tokenization_spaces": false,
86
+ "eos_token": "</s>",
87
+ "model_max_length": 4096,
88
+ "pad_token": "<unk>",
89
+ "padding_side": "right",
90
+ "sp_model_kwargs": {},
91
+ "split_special_tokens": false,
92
+ "tokenizer_class": "NEOTokenizer",
93
+ "unk_token": "<unk>",
94
+ "use_fast": false
95
+ }