jukofyork commited on
Commit
067e869
1 Parent(s): be8cf58

Copied modified versions of CodeLlama-70b-hf json files

Browse files

The following changes were made:

- Changed "torch_dtype": "float16" in config.json file.
- Added the correct Llama/Mistral "chat_template" to tokenizer_config.json file.

config.json CHANGED
@@ -1,5 +1,4 @@
1
  {
2
- "_name_or_path": "openbmb/Eurus-70b-sft",
3
  "architectures": [
4
  "LlamaForCausalLM"
5
  ],
@@ -11,7 +10,7 @@
11
  "hidden_size": 8192,
12
  "initializer_range": 0.02,
13
  "intermediate_size": 28672,
14
- "max_position_embeddings": 4096,
15
  "model_type": "llama",
16
  "num_attention_heads": 64,
17
  "num_hidden_layers": 80,
@@ -19,10 +18,10 @@
19
  "pretraining_tp": 1,
20
  "rms_norm_eps": 1e-05,
21
  "rope_scaling": null,
22
- "rope_theta": 10000,
23
  "tie_word_embeddings": false,
24
  "torch_dtype": "float16",
25
- "transformers_version": "4.35.0",
26
  "use_cache": true,
27
  "vocab_size": 32016
28
  }
 
1
  {
 
2
  "architectures": [
3
  "LlamaForCausalLM"
4
  ],
 
10
  "hidden_size": 8192,
11
  "initializer_range": 0.02,
12
  "intermediate_size": 28672,
13
+ "max_position_embeddings": 16384,
14
  "model_type": "llama",
15
  "num_attention_heads": 64,
16
  "num_hidden_layers": 80,
 
18
  "pretraining_tp": 1,
19
  "rms_norm_eps": 1e-05,
20
  "rope_scaling": null,
21
+ "rope_theta": 1000000,
22
  "tie_word_embeddings": false,
23
  "torch_dtype": "float16",
24
+ "transformers_version": "4.37.1",
25
  "use_cache": true,
26
  "vocab_size": 32016
27
  }
generation_config.json CHANGED
@@ -1,9 +1,6 @@
1
  {
2
  "_from_model_config": true,
3
  "bos_token_id": 1,
4
- "eos_token_id": [
5
- 2,
6
- 32015
7
- ],
8
- "transformers_version": "4.35.0"
9
  }
 
1
  {
2
  "_from_model_config": true,
3
  "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "transformers_version": "4.37.1"
 
 
 
6
  }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -1,4 +1,6 @@
1
  {
 
 
2
  "added_tokens_decoder": {
3
  "0": {
4
  "content": "<unk>",
@@ -23,18 +25,9 @@
23
  "rstrip": false,
24
  "single_word": false,
25
  "special": true
26
- },
27
- "32015": {
28
- "content": "<step>",
29
- "lstrip": true,
30
- "normalized": false,
31
- "rstrip": true,
32
- "single_word": true,
33
- "special": false
34
  }
35
  },
36
  "bos_token": "<s>",
37
- "chat_template": "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token + ' ' }}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}",
38
  "clean_up_tokenization_spaces": false,
39
  "eos_token": "</s>",
40
  "legacy": true,
@@ -44,5 +37,6 @@
44
  "spaces_between_special_tokens": false,
45
  "tokenizer_class": "LlamaTokenizer",
46
  "unk_token": "<unk>",
47
- "use_default_system_prompt": false
 
48
  }
 
1
  {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
  "added_tokens_decoder": {
5
  "0": {
6
  "content": "<unk>",
 
25
  "rstrip": false,
26
  "single_word": false,
27
  "special": true
 
 
 
 
 
 
 
 
28
  }
29
  },
30
  "bos_token": "<s>",
 
31
  "clean_up_tokenization_spaces": false,
32
  "eos_token": "</s>",
33
  "legacy": true,
 
37
  "spaces_between_special_tokens": false,
38
  "tokenizer_class": "LlamaTokenizer",
39
  "unk_token": "<unk>",
40
+ "use_default_system_prompt": false,
41
+ "chat_template": "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token}}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}"
42
  }