IlyaGusev commited on
Commit
91d7b8a
1 Parent(s): ae61b4f

Tagengo version

Browse files
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "../../llama-3-8b",
3
  "architectures": [
4
  "LlamaForCausalLM"
5
  ],
@@ -22,7 +22,7 @@
22
  "rope_theta": 500000.0,
23
  "tie_word_embeddings": false,
24
  "torch_dtype": "bfloat16",
25
- "transformers_version": "4.39.3",
26
  "use_cache": true,
27
  "vocab_size": 128256
28
  }
 
1
  {
2
+ "_name_or_path": "models/llama-3-8b",
3
  "architectures": [
4
  "LlamaForCausalLM"
5
  ],
 
22
  "rope_theta": 500000.0,
23
  "tie_word_embeddings": false,
24
  "torch_dtype": "bfloat16",
25
+ "transformers_version": "4.40.0",
26
  "use_cache": true,
27
  "vocab_size": 128256
28
  }
generation_config.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "_from_model_config": true,
3
  "bos_token_id": 128000,
4
- "eos_token_id": 128001,
5
- "pad_token_id": 128002,
6
  "transformers_version": "4.34.0",
7
  "temperature": 0.2,
8
  "top_p": 0.9,
 
1
  {
2
  "_from_model_config": true,
3
  "bos_token_id": 128000,
4
+ "eos_token_id": 128009,
5
+ "pad_token_id": 128000,
6
  "transformers_version": "4.34.0",
7
  "temperature": 0.2,
8
  "top_p": 0.9,
model-00001-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:98ce023d28e5ed2b3f7a290ebb91d7d9a8aafc02ef81fe3b98ebd0006d62a2ef
3
  size 4976698672
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:23d40b2624d779257ec389b441c65d7ba5365bb86c0b6d5d3341cbf5dbc2cb25
3
  size 4976698672
model-00002-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:397431f907bc90da368dbea0328415e085b40cebb6bc9730d55e75047ef87e9f
3
  size 4999802720
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d773178b71d3c0e544ad4a971e754a5a9eb6f884e882e80cdf9497287fc4ff8
3
  size 4999802720
model-00003-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8bf16cef92950d1440ce3bed33ce3f7ee90fc01d8c7f5aecd87495a79dc8cfe4
3
  size 4915916176
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:47c2fbf539155e5796e525249ed99744f8f1453970313e6078b6becf20aa085a
3
  size 4915916176
model-00004-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:470d384d0d2efefe5a6829eae1ab3be5398373542c524c7a4d25818ef82edcab
3
  size 1168138808
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:47d7b7ef31a88c44cc3fd39c43f91b4fe87a7a492f1e3ecee72dbefa4e479f49
3
  size 1168138808
special_tokens_map.json CHANGED
@@ -1,8 +1,7 @@
1
  {
2
  "additional_special_tokens": [
3
  "<|im_start|>",
4
- "<|im_end|>",
5
- "<|reserved_special_token_0|>"
6
  ],
7
  "bos_token": {
8
  "content": "<|im_start|>",
@@ -19,7 +18,7 @@
19
  "single_word": false
20
  },
21
  "pad_token": {
22
- "content": "<|reserved_special_token_0|>",
23
  "lstrip": false,
24
  "normalized": false,
25
  "rstrip": false,
 
1
  {
2
  "additional_special_tokens": [
3
  "<|im_start|>",
4
+ "<|im_end|>"
 
5
  ],
6
  "bos_token": {
7
  "content": "<|im_start|>",
 
18
  "single_word": false
19
  },
20
  "pad_token": {
21
+ "content": "<|im_start|>",
22
  "lstrip": false,
23
  "normalized": false,
24
  "rstrip": false,
tokenizer.json CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b287c484fbf8378e3e9e98188bf9055b077fa83f995bb36e724af59ae1496905
3
- size 9084453
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8e7cad82642921f006bd0a63b22c613ccbfcf663e81a42da76a476e69bafc8df
3
+ size 9084486
tokenizer_config.json CHANGED
@@ -9,7 +9,7 @@
9
  "special": true
10
  },
11
  "128001": {
12
- "content": "<|im_end|>",
13
  "lstrip": false,
14
  "normalized": false,
15
  "rstrip": false,
@@ -73,7 +73,7 @@
73
  "special": true
74
  },
75
  "128009": {
76
- "content": "<|eot_id|>",
77
  "lstrip": false,
78
  "normalized": false,
79
  "rstrip": false,
@@ -2051,8 +2051,7 @@
2051
  },
2052
  "additional_special_tokens": [
2053
  "<|im_start|>",
2054
- "<|im_end|>",
2055
- "<|reserved_special_token_0|>"
2056
  ],
2057
  "bos_token": "<|im_start|>",
2058
  "chat_template": "{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% set system_message = 'Ты — Сайга, русскоязычный автоматический ассистент. Ты разговариваешь с людьми и помогаешь им.' %}{% endif %}{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in loop_messages %}{% if loop.index0 == 0 %}{{'<|im_start|>system\n' + system_message + '<|im_end|>\n'}}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
@@ -2063,7 +2062,7 @@
2063
  "attention_mask"
2064
  ],
2065
  "model_max_length": 8192,
2066
- "pad_token": "<|reserved_special_token_0|>",
2067
  "padding_side": "left",
2068
  "tokenizer_class": "PreTrainedTokenizerFast"
2069
  }
 
9
  "special": true
10
  },
11
  "128001": {
12
+ "content": "<|end_of_text|>",
13
  "lstrip": false,
14
  "normalized": false,
15
  "rstrip": false,
 
73
  "special": true
74
  },
75
  "128009": {
76
+ "content": "<|im_end|>",
77
  "lstrip": false,
78
  "normalized": false,
79
  "rstrip": false,
 
2051
  },
2052
  "additional_special_tokens": [
2053
  "<|im_start|>",
2054
+ "<|im_end|>"
 
2055
  ],
2056
  "bos_token": "<|im_start|>",
2057
  "chat_template": "{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% set system_message = 'Ты — Сайга, русскоязычный автоматический ассистент. Ты разговариваешь с людьми и помогаешь им.' %}{% endif %}{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in loop_messages %}{% if loop.index0 == 0 %}{{'<|im_start|>system\n' + system_message + '<|im_end|>\n'}}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
 
2062
  "attention_mask"
2063
  ],
2064
  "model_max_length": 8192,
2065
+ "pad_token": "<|im_start|>",
2066
  "padding_side": "left",
2067
  "tokenizer_class": "PreTrainedTokenizerFast"
2068
  }