Siddartha10 commited on
Commit
70c9f08
1 Parent(s): 0e78189

Siddartha10/notworkingmodel

Browse files
config.json CHANGED
@@ -5,8 +5,8 @@
5
  ],
6
  "attention_bias": false,
7
  "attention_dropout": 0.0,
8
- "bos_token_id": 0,
9
- "eos_token_id": 0,
10
  "hidden_act": "silu",
11
  "hidden_size": 576,
12
  "initializer_range": 0.02,
@@ -17,6 +17,7 @@
17
  "num_attention_heads": 9,
18
  "num_hidden_layers": 30,
19
  "num_key_value_heads": 3,
 
20
  "pretraining_tp": 1,
21
  "rms_norm_eps": 1e-05,
22
  "rope_scaling": null,
 
5
  ],
6
  "attention_bias": false,
7
  "attention_dropout": 0.0,
8
+ "bos_token_id": 1,
9
+ "eos_token_id": 2,
10
  "hidden_act": "silu",
11
  "hidden_size": 576,
12
  "initializer_range": 0.02,
 
17
  "num_attention_heads": 9,
18
  "num_hidden_layers": 30,
19
  "num_key_value_heads": 3,
20
+ "pad_token_id": 2,
21
  "pretraining_tp": 1,
22
  "rms_norm_eps": 1e-05,
23
  "rope_scaling": null,
generation_config.json CHANGED
@@ -1,6 +1,7 @@
1
  {
2
  "_from_model_config": true,
3
- "bos_token_id": 0,
4
- "eos_token_id": 0,
 
5
  "transformers_version": "4.44.2"
6
  }
 
1
  {
2
  "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "pad_token_id": 2,
6
  "transformers_version": "4.44.2"
7
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:82cfe952901ca429a921f415bd6a31cc2c78f1b7915f1bcdb8c92d8b6b33b368
3
  size 538090408
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:118dc0a2aaf575a866305616b557ecbfec9c6ed2d30d8e921a7fe5e94eeb279d
3
  size 538090408
special_tokens_map.json CHANGED
@@ -1,38 +1,23 @@
1
  {
2
  "additional_special_tokens": [
3
- "<|endoftext|>",
4
- "<|im_start|>",
5
- "<|im_end|>",
6
- "<repo_name>",
7
- "<reponame>",
8
- "<file_sep>",
9
- "<filename>",
10
- "<gh_stars>",
11
- "<issue_start>",
12
- "<issue_comment>",
13
- "<issue_closed>",
14
- "<jupyter_start>",
15
- "<jupyter_text>",
16
- "<jupyter_code>",
17
- "<jupyter_output>",
18
- "<jupyter_script>",
19
- "<empty_output>"
20
  ],
21
- "bos_token": {
22
- "content": "<|endoftext|>",
23
- "lstrip": false,
24
- "normalized": false,
25
- "rstrip": false,
26
- "single_word": false
27
- },
28
- "eos_token": {
29
- "content": "<|endoftext|>",
30
- "lstrip": false,
31
- "normalized": false,
32
- "rstrip": false,
33
- "single_word": false
34
- },
35
- "pad_token": "<|endoftext|>",
36
  "unk_token": {
37
  "content": "<|endoftext|>",
38
  "lstrip": false,
 
1
  {
2
  "additional_special_tokens": [
3
+ {
4
+ "content": "<|im_start|>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false
9
+ },
10
+ {
11
+ "content": "<|im_end|>",
12
+ "lstrip": false,
13
+ "normalized": false,
14
+ "rstrip": false,
15
+ "single_word": false
16
+ }
 
 
 
17
  ],
18
+ "bos_token": "<|im_start|>",
19
+ "eos_token": "<|im_end|>",
20
+ "pad_token": "<|im_end|>",
 
 
 
 
 
 
 
 
 
 
 
 
21
  "unk_token": {
22
  "content": "<|endoftext|>",
23
  "lstrip": false,
tokenizer.json CHANGED
@@ -1,11 +1,6 @@
1
  {
2
  "version": "1.0",
3
- "truncation": {
4
- "direction": "Right",
5
- "max_length": 1024,
6
- "strategy": "LongestFirst",
7
- "stride": 0
8
- },
9
  "padding": null,
10
  "added_tokens": [
11
  {
 
1
  {
2
  "version": "1.0",
3
+ "truncation": null,
 
 
 
 
 
4
  "padding": null,
5
  "added_tokens": [
6
  {
tokenizer_config.json CHANGED
@@ -139,29 +139,15 @@
139
  }
140
  },
141
  "additional_special_tokens": [
142
- "<|endoftext|>",
143
  "<|im_start|>",
144
- "<|im_end|>",
145
- "<repo_name>",
146
- "<reponame>",
147
- "<file_sep>",
148
- "<filename>",
149
- "<gh_stars>",
150
- "<issue_start>",
151
- "<issue_comment>",
152
- "<issue_closed>",
153
- "<jupyter_start>",
154
- "<jupyter_text>",
155
- "<jupyter_code>",
156
- "<jupyter_output>",
157
- "<jupyter_script>",
158
- "<empty_output>"
159
  ],
160
- "bos_token": "<|endoftext|>",
 
161
  "clean_up_tokenization_spaces": false,
162
- "eos_token": "<|endoftext|>",
163
  "model_max_length": 1000000000000000019884624838656,
164
- "pad_token": "<|endoftext|>",
165
  "tokenizer_class": "GPT2Tokenizer",
166
  "unk_token": "<|endoftext|>",
167
  "vocab_size": 49152
 
139
  }
140
  },
141
  "additional_special_tokens": [
 
142
  "<|im_start|>",
143
+ "<|im_end|>"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
144
  ],
145
+ "bos_token": "<|im_start|>",
146
+ "chat_template": "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
147
  "clean_up_tokenization_spaces": false,
148
+ "eos_token": "<|im_end|>",
149
  "model_max_length": 1000000000000000019884624838656,
150
+ "pad_token": "<|im_end|>",
151
  "tokenizer_class": "GPT2Tokenizer",
152
  "unk_token": "<|endoftext|>",
153
  "vocab_size": 49152