Downtown-Case commited on
Commit
65f8153
1 Parent(s): ea72df6

Upload folder using huggingface_hub

Browse files
config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/home/alpha/Models/Raw/internlm_internlm2_5-7b-chat-1m",
3
+ "architectures": [
4
+ "LlamaForCausalLM"
5
+ ],
6
+ "attn_implementation": "eager",
7
+ "bias": false,
8
+ "bos_token_id": 1,
9
+ "eos_token_id": 2,
10
+ "hidden_act": "silu",
11
+ "hidden_size": 4096,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 14336,
14
+ "max_position_embeddings": 262144,
15
+ "model_type": "llama",
16
+ "num_attention_heads": 32,
17
+ "num_hidden_layers": 32,
18
+ "num_key_value_heads": 8,
19
+ "pad_token_id": 2,
20
+ "pretraining_tp": 1,
21
+ "rms_norm_eps": 1e-05,
22
+ "rope_scaling": {
23
+ "factor": 2.5,
24
+ "type": "dynamic"
25
+ },
26
+ "rope_theta": 50000000,
27
+ "tie_word_embeddings": false,
28
+ "torch_dtype": "bfloat16",
29
+ "transformers_version": "4.42.3",
30
+ "use_cache": true,
31
+ "vocab_size": 92544,
32
+ "quantization_config": {
33
+ "quant_method": "exl2",
34
+ "version": "0.1.5",
35
+ "bits": 6.0,
36
+ "head_bits": 6,
37
+ "calibration": {
38
+ "rows": 100,
39
+ "length": 2048,
40
+ "dataset": "(default)"
41
+ }
42
+ }
43
+ }
convert_weights_internlm.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # 1/17/2024
3
+ # Charles O. Goddard
4
+ # https://huggingface.co/chargoddard/internlm2-7b-llama/raw/main/convert_weights.py
5
+ """Convert internlm2 weights to Llama format."""
6
+
7
+ import json
8
+ import os
9
+ import einops
10
+ import tqdm
11
+ from mergekit.io import LazyTensorLoader, TensorWriter
12
+ from mergekit.common import ModelReference
13
+ from transformers import LlamaTokenizer
14
+
15
+ MODEL_IN = "raw weights"
16
+ OUT_PATH = "llamafied weights"
17
+
18
+ model_ref = ModelReference.parse(MODEL_IN)
19
+ cfg = model_ref.config(trust_remote_code=True)
20
+ head_dim = cfg.hidden_size // cfg.num_attention_heads
21
+ num_key_value_groups = cfg.num_attention_heads // cfg.num_key_value_heads
22
+ loader = LazyTensorLoader(model_ref.tensor_index(), lazy_unpickle=True)
23
+ writer = TensorWriter(OUT_PATH)
24
+
25
+ SIMPLE_REPLACEMENTS = {
26
+ "feed_forward.w1": "mlp.gate_proj",
27
+ "feed_forward.w2": "mlp.down_proj",
28
+ "feed_forward.w3": "mlp.up_proj",
29
+ "attention.wo": "self_attn.o_proj",
30
+ "ffn_norm": "post_attention_layernorm",
31
+ "attention_norm": "input_layernorm",
32
+ "tok_embeddings": "embed_tokens",
33
+ "output.weight": "lm_head.weight",
34
+ }
35
+
36
+ for tensor_name in tqdm.tqdm(loader.index.tensor_paths):
37
+ tensor = loader.get_tensor(tensor_name)
38
+ if "attention.wqkv" in tensor_name:
39
+ # make me think about tensor shapes will you >:(
40
+
41
+ # ((cfg.num_attention_heads + 2 * cfg.num_key_value_heads) * head_dim, cfg.hidden_size) x (batch_sz, sq_len, cfg.hidden_size)
42
+ # -> (batch_sz, sq_len, (cfg.num_attention_heads + 2 * cfg.num_key_value_heads) * head_dim)
43
+ # qkv_states = rearrange(
44
+ # qkv_states,
45
+ # "b q (h gs d) -> b q h gs d",
46
+ # gs=2 + self.num_key_value_groups,
47
+ # d=self.head_dim,
48
+ # )
49
+ # ->(batch_sz, sq_len, h, 2 + self.num_key_value_groups, head_dim)
50
+ qkv_vecs = einops.rearrange(
51
+ tensor, "(h gs d) z -> h gs d z", gs=2 + num_key_value_groups, d=head_dim
52
+ )
53
+ q_proj = (
54
+ qkv_vecs[:, :num_key_value_groups, ...]
55
+ .reshape(-1, cfg.hidden_size)
56
+ .contiguous()
57
+ )
58
+ k_proj = qkv_vecs[:, -2, ...].reshape(-1, cfg.hidden_size).contiguous()
59
+ v_proj = qkv_vecs[:, -1, ...].reshape(-1, cfg.hidden_size).contiguous()
60
+ assert k_proj.shape == v_proj.shape
61
+
62
+ writer.save_tensor(
63
+ tensor_name.replace("attention.wqkv", "self_attn.q_proj"),
64
+ q_proj,
65
+ clone=True,
66
+ )
67
+ writer.save_tensor(
68
+ tensor_name.replace("attention.wqkv", "self_attn.k_proj"),
69
+ k_proj,
70
+ clone=True,
71
+ )
72
+ writer.save_tensor(
73
+ tensor_name.replace("attention.wqkv", "self_attn.v_proj"),
74
+ v_proj,
75
+ clone=True,
76
+ )
77
+ continue
78
+
79
+ out_name = tensor_name
80
+ for pattern, sub in SIMPLE_REPLACEMENTS.items():
81
+ if pattern in out_name:
82
+ out_name = out_name.replace(pattern, sub)
83
+ writer.save_tensor(out_name, tensor)
84
+ writer.finalize()
85
+
86
+ cfg_dict = json.loads(cfg.to_json_string())
87
+ del cfg_dict["auto_map"]
88
+ cfg_dict["architectures"] = ["LlamaForCausalLM"]
89
+ cfg_dict["model_type"] = "llama"
90
+ if "rope_scaling" in cfg_dict and cfg_dict["rope_scaling"]["factor"] == 1.0:
91
+ del cfg_dict["rope_scaling"]
92
+ with open(os.path.join(OUT_PATH, "config.json"), "w", encoding="utf-8") as fp:
93
+ json.dump(cfg_dict, fp, indent=2)
94
+
95
+ # InternLMTokenizer differences:
96
+ # 1. clean_up_tokenization() hardcoded to always be called
97
+ # 2. might prepend a space to some tokens that LlamaTokenizer doesn't if they're the first token
98
+ # 1 is easy to fix, 2... is not important
99
+ tok = LlamaTokenizer.from_pretrained(MODEL_IN, trust_remote_code=False, legacy=True)
100
+ tok.clean_up_tokenization_spaces = True
101
+ tok.save_pretrained(OUT_PATH)
model.safetensors.index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"metadata": {"mergekit_version": "0.0.4.4", "total_size": 15475417088}, "weight_map": {"model.layers.0.self_attn.o_proj.weight": "model-00001-of-00004.safetensors", "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00004.safetensors", "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00004.safetensors", "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00004.safetensors", "model.layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors", "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors", "model.layers.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors", "model.layers.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors", "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00004.safetensors", "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00004.safetensors", "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00004.safetensors", "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00004.safetensors", "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00004.safetensors", "model.layers.1.input_layernorm.weight": "model-00001-of-00004.safetensors", "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors", "model.layers.1.mlp.down_proj.weight": "model-00001-of-00004.safetensors", "model.layers.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors", "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00004.safetensors", "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00004.safetensors", "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00004.safetensors", "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00004.safetensors", "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00004.safetensors", "model.layers.10.input_layernorm.weight": "model-00001-of-00004.safetensors", "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00004.safetensors", "model.layers.10.mlp.down_proj.weight": "model-00001-of-00004.safetensors", "model.layers.10.mlp.up_proj.weight": "model-00001-of-00004.safetensors", "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00004.safetensors", "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00004.safetensors", "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00004.safetensors", "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00004.safetensors", "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00004.safetensors", "model.layers.11.input_layernorm.weight": "model-00001-of-00004.safetensors", "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00004.safetensors", "model.layers.11.mlp.down_proj.weight": "model-00001-of-00004.safetensors", "model.layers.11.mlp.up_proj.weight": "model-00001-of-00004.safetensors", "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00004.safetensors", "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00004.safetensors", "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00004.safetensors", "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00004.safetensors", "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00004.safetensors", "model.layers.12.input_layernorm.weight": "model-00001-of-00004.safetensors", "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00004.safetensors", "model.layers.12.mlp.down_proj.weight": "model-00001-of-00004.safetensors", "model.layers.12.mlp.up_proj.weight": "model-00001-of-00004.safetensors", "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00004.safetensors", "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00004.safetensors", "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00004.safetensors", "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00004.safetensors", "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00004.safetensors", "model.layers.13.input_layernorm.weight": "model-00001-of-00004.safetensors", "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00004.safetensors", "model.layers.13.mlp.down_proj.weight": "model-00001-of-00004.safetensors", "model.layers.13.mlp.up_proj.weight": "model-00001-of-00004.safetensors", "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00004.safetensors", "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00004.safetensors", "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00004.safetensors", "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00004.safetensors", "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00004.safetensors", "model.layers.14.input_layernorm.weight": "model-00001-of-00004.safetensors", "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00004.safetensors", "model.layers.14.mlp.down_proj.weight": "model-00001-of-00004.safetensors", "model.layers.14.mlp.up_proj.weight": "model-00001-of-00004.safetensors", "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00004.safetensors", "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00004.safetensors", "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00004.safetensors", "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00004.safetensors", "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00004.safetensors", "model.layers.15.input_layernorm.weight": "model-00001-of-00004.safetensors", "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00004.safetensors", "model.layers.15.mlp.down_proj.weight": "model-00001-of-00004.safetensors", "model.layers.15.mlp.up_proj.weight": "model-00001-of-00004.safetensors", "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00004.safetensors", "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00004.safetensors", "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00004.safetensors", "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00004.safetensors", "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00004.safetensors", "model.layers.16.input_layernorm.weight": "model-00001-of-00004.safetensors", "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00004.safetensors", "model.layers.16.mlp.down_proj.weight": "model-00001-of-00004.safetensors", "model.layers.16.mlp.up_proj.weight": "model-00001-of-00004.safetensors", "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00004.safetensors", "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00004.safetensors", "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00004.safetensors", "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00004.safetensors", "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00004.safetensors", "model.layers.17.input_layernorm.weight": "model-00001-of-00004.safetensors", "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00004.safetensors", "model.layers.17.mlp.down_proj.weight": "model-00001-of-00004.safetensors", "model.layers.17.mlp.up_proj.weight": "model-00001-of-00004.safetensors", "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00004.safetensors", "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00004.safetensors", "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00004.safetensors", "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00004.safetensors", "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00004.safetensors", "model.layers.18.input_layernorm.weight": "model-00001-of-00004.safetensors", "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00004.safetensors", "model.layers.18.mlp.down_proj.weight": "model-00001-of-00004.safetensors", "model.layers.18.mlp.up_proj.weight": "model-00001-of-00004.safetensors", "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00004.safetensors", "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00004.safetensors", "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00004.safetensors", "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00004.safetensors", "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00004.safetensors", "model.layers.19.input_layernorm.weight": "model-00001-of-00004.safetensors", "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00004.safetensors", "model.layers.19.mlp.down_proj.weight": "model-00002-of-00004.safetensors", "model.layers.19.mlp.up_proj.weight": "model-00002-of-00004.safetensors", "model.layers.19.post_attention_layernorm.weight": "model-00002-of-00004.safetensors", "model.layers.2.self_attn.o_proj.weight": "model-00002-of-00004.safetensors", "model.layers.2.self_attn.q_proj.weight": "model-00002-of-00004.safetensors", "model.layers.2.self_attn.k_proj.weight": "model-00002-of-00004.safetensors", "model.layers.2.self_attn.v_proj.weight": "model-00002-of-00004.safetensors", "model.layers.2.input_layernorm.weight": "model-00002-of-00004.safetensors", "model.layers.2.mlp.gate_proj.weight": "model-00002-of-00004.safetensors", "model.layers.2.mlp.down_proj.weight": "model-00002-of-00004.safetensors", "model.layers.2.mlp.up_proj.weight": "model-00002-of-00004.safetensors", "model.layers.2.post_attention_layernorm.weight": "model-00002-of-00004.safetensors", "model.layers.20.self_attn.o_proj.weight": "model-00002-of-00004.safetensors", "model.layers.20.self_attn.q_proj.weight": "model-00002-of-00004.safetensors", "model.layers.20.self_attn.k_proj.weight": "model-00002-of-00004.safetensors", "model.layers.20.self_attn.v_proj.weight": "model-00002-of-00004.safetensors", "model.layers.20.input_layernorm.weight": "model-00002-of-00004.safetensors", "model.layers.20.mlp.gate_proj.weight": "model-00002-of-00004.safetensors", "model.layers.20.mlp.down_proj.weight": "model-00002-of-00004.safetensors", "model.layers.20.mlp.up_proj.weight": "model-00002-of-00004.safetensors", "model.layers.20.post_attention_layernorm.weight": "model-00002-of-00004.safetensors", "model.layers.21.self_attn.o_proj.weight": "model-00002-of-00004.safetensors", "model.layers.21.self_attn.q_proj.weight": "model-00002-of-00004.safetensors", "model.layers.21.self_attn.k_proj.weight": "model-00002-of-00004.safetensors", "model.layers.21.self_attn.v_proj.weight": "model-00002-of-00004.safetensors", "model.layers.21.input_layernorm.weight": "model-00002-of-00004.safetensors", "model.layers.21.mlp.gate_proj.weight": "model-00002-of-00004.safetensors", "model.layers.21.mlp.down_proj.weight": "model-00002-of-00004.safetensors", "model.layers.21.mlp.up_proj.weight": "model-00002-of-00004.safetensors", "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00004.safetensors", "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00004.safetensors", "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00004.safetensors", "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00004.safetensors", "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00004.safetensors", "model.layers.22.input_layernorm.weight": "model-00002-of-00004.safetensors", "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00004.safetensors", "model.layers.22.mlp.down_proj.weight": "model-00002-of-00004.safetensors", "model.layers.22.mlp.up_proj.weight": "model-00002-of-00004.safetensors", "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00004.safetensors", "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00004.safetensors", "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00004.safetensors", "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00004.safetensors", "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00004.safetensors", "model.layers.23.input_layernorm.weight": "model-00002-of-00004.safetensors", "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00004.safetensors", "model.layers.23.mlp.down_proj.weight": "model-00002-of-00004.safetensors", "model.layers.23.mlp.up_proj.weight": "model-00002-of-00004.safetensors", "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00004.safetensors", "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00004.safetensors", "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00004.safetensors", "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00004.safetensors", "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00004.safetensors", "model.layers.24.input_layernorm.weight": "model-00002-of-00004.safetensors", "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00004.safetensors", "model.layers.24.mlp.down_proj.weight": "model-00002-of-00004.safetensors", "model.layers.24.mlp.up_proj.weight": "model-00002-of-00004.safetensors", "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00004.safetensors", "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00004.safetensors", "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00004.safetensors", "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00004.safetensors", "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00004.safetensors", "model.layers.25.input_layernorm.weight": "model-00002-of-00004.safetensors", "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00004.safetensors", "model.layers.25.mlp.down_proj.weight": "model-00002-of-00004.safetensors", "model.layers.25.mlp.up_proj.weight": "model-00002-of-00004.safetensors", "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00004.safetensors", "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00004.safetensors", "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00004.safetensors", "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00004.safetensors", "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00004.safetensors", "model.layers.26.input_layernorm.weight": "model-00002-of-00004.safetensors", "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00004.safetensors", "model.layers.26.mlp.down_proj.weight": "model-00002-of-00004.safetensors", "model.layers.26.mlp.up_proj.weight": "model-00002-of-00004.safetensors", "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00004.safetensors", "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00004.safetensors", "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00004.safetensors", "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00004.safetensors", "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00004.safetensors", "model.layers.27.input_layernorm.weight": "model-00002-of-00004.safetensors", "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00004.safetensors", "model.layers.27.mlp.down_proj.weight": "model-00002-of-00004.safetensors", "model.layers.27.mlp.up_proj.weight": "model-00002-of-00004.safetensors", "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00004.safetensors", "model.layers.28.self_attn.o_proj.weight": "model-00002-of-00004.safetensors", "model.layers.28.self_attn.q_proj.weight": "model-00002-of-00004.safetensors", "model.layers.28.self_attn.k_proj.weight": "model-00002-of-00004.safetensors", "model.layers.28.self_attn.v_proj.weight": "model-00002-of-00004.safetensors", "model.layers.28.input_layernorm.weight": "model-00002-of-00004.safetensors", "model.layers.28.mlp.gate_proj.weight": "model-00002-of-00004.safetensors", "model.layers.28.mlp.down_proj.weight": "model-00002-of-00004.safetensors", "model.layers.28.mlp.up_proj.weight": "model-00002-of-00004.safetensors", "model.layers.28.post_attention_layernorm.weight": "model-00002-of-00004.safetensors", "model.layers.29.self_attn.o_proj.weight": "model-00002-of-00004.safetensors", "model.layers.29.self_attn.q_proj.weight": "model-00002-of-00004.safetensors", "model.layers.29.self_attn.k_proj.weight": "model-00002-of-00004.safetensors", "model.layers.29.self_attn.v_proj.weight": "model-00002-of-00004.safetensors", "model.layers.29.input_layernorm.weight": "model-00002-of-00004.safetensors", "model.layers.29.mlp.gate_proj.weight": "model-00002-of-00004.safetensors", "model.layers.29.mlp.down_proj.weight": "model-00002-of-00004.safetensors", "model.layers.29.mlp.up_proj.weight": "model-00003-of-00004.safetensors", "model.layers.29.post_attention_layernorm.weight": "model-00003-of-00004.safetensors", "model.layers.3.self_attn.o_proj.weight": "model-00003-of-00004.safetensors", "model.layers.3.self_attn.q_proj.weight": "model-00003-of-00004.safetensors", "model.layers.3.self_attn.k_proj.weight": "model-00003-of-00004.safetensors", "model.layers.3.self_attn.v_proj.weight": "model-00003-of-00004.safetensors", "model.layers.3.input_layernorm.weight": "model-00003-of-00004.safetensors", "model.layers.3.mlp.gate_proj.weight": "model-00003-of-00004.safetensors", "model.layers.3.mlp.down_proj.weight": "model-00003-of-00004.safetensors", "model.layers.3.mlp.up_proj.weight": "model-00003-of-00004.safetensors", "model.layers.3.post_attention_layernorm.weight": "model-00003-of-00004.safetensors", "model.layers.30.self_attn.o_proj.weight": "model-00003-of-00004.safetensors", "model.layers.30.self_attn.q_proj.weight": "model-00003-of-00004.safetensors", "model.layers.30.self_attn.k_proj.weight": "model-00003-of-00004.safetensors", "model.layers.30.self_attn.v_proj.weight": "model-00003-of-00004.safetensors", "model.layers.30.input_layernorm.weight": "model-00003-of-00004.safetensors", "model.layers.30.mlp.gate_proj.weight": "model-00003-of-00004.safetensors", "model.layers.30.mlp.down_proj.weight": "model-00003-of-00004.safetensors", "model.layers.30.mlp.up_proj.weight": "model-00003-of-00004.safetensors", "model.layers.30.post_attention_layernorm.weight": "model-00003-of-00004.safetensors", "model.layers.31.self_attn.o_proj.weight": "model-00003-of-00004.safetensors", "model.layers.31.self_attn.q_proj.weight": "model-00003-of-00004.safetensors", "model.layers.31.self_attn.k_proj.weight": "model-00003-of-00004.safetensors", "model.layers.31.self_attn.v_proj.weight": "model-00003-of-00004.safetensors", "model.layers.31.input_layernorm.weight": "model-00003-of-00004.safetensors", "model.layers.31.mlp.gate_proj.weight": "model-00003-of-00004.safetensors", "model.layers.31.mlp.down_proj.weight": "model-00003-of-00004.safetensors", "model.layers.31.mlp.up_proj.weight": "model-00003-of-00004.safetensors", "model.layers.31.post_attention_layernorm.weight": "model-00003-of-00004.safetensors", "model.layers.4.self_attn.o_proj.weight": "model-00003-of-00004.safetensors", "model.layers.4.self_attn.q_proj.weight": "model-00003-of-00004.safetensors", "model.layers.4.self_attn.k_proj.weight": "model-00003-of-00004.safetensors", "model.layers.4.self_attn.v_proj.weight": "model-00003-of-00004.safetensors", "model.layers.4.input_layernorm.weight": "model-00003-of-00004.safetensors", "model.layers.4.mlp.gate_proj.weight": "model-00003-of-00004.safetensors", "model.layers.4.mlp.down_proj.weight": "model-00003-of-00004.safetensors", "model.layers.4.mlp.up_proj.weight": "model-00003-of-00004.safetensors", "model.layers.4.post_attention_layernorm.weight": "model-00003-of-00004.safetensors", "model.layers.5.self_attn.o_proj.weight": "model-00003-of-00004.safetensors", "model.layers.5.self_attn.q_proj.weight": "model-00003-of-00004.safetensors", "model.layers.5.self_attn.k_proj.weight": "model-00003-of-00004.safetensors", "model.layers.5.self_attn.v_proj.weight": "model-00003-of-00004.safetensors", "model.layers.5.input_layernorm.weight": "model-00003-of-00004.safetensors", "model.layers.5.mlp.gate_proj.weight": "model-00003-of-00004.safetensors", "model.layers.5.mlp.down_proj.weight": "model-00003-of-00004.safetensors", "model.layers.5.mlp.up_proj.weight": "model-00003-of-00004.safetensors", "model.layers.5.post_attention_layernorm.weight": "model-00003-of-00004.safetensors", "model.layers.6.self_attn.o_proj.weight": "model-00003-of-00004.safetensors", "model.layers.6.self_attn.q_proj.weight": "model-00003-of-00004.safetensors", "model.layers.6.self_attn.k_proj.weight": "model-00003-of-00004.safetensors", "model.layers.6.self_attn.v_proj.weight": "model-00003-of-00004.safetensors", "model.layers.6.input_layernorm.weight": "model-00003-of-00004.safetensors", "model.layers.6.mlp.gate_proj.weight": "model-00003-of-00004.safetensors", "model.layers.6.mlp.down_proj.weight": "model-00003-of-00004.safetensors", "model.layers.6.mlp.up_proj.weight": "model-00003-of-00004.safetensors", "model.layers.6.post_attention_layernorm.weight": "model-00003-of-00004.safetensors", "model.layers.7.self_attn.o_proj.weight": "model-00003-of-00004.safetensors", "model.layers.7.self_attn.q_proj.weight": "model-00003-of-00004.safetensors", "model.layers.7.self_attn.k_proj.weight": "model-00003-of-00004.safetensors", "model.layers.7.self_attn.v_proj.weight": "model-00003-of-00004.safetensors", "model.layers.7.input_layernorm.weight": "model-00003-of-00004.safetensors", "model.layers.7.mlp.gate_proj.weight": "model-00003-of-00004.safetensors", "model.layers.7.mlp.down_proj.weight": "model-00003-of-00004.safetensors", "model.layers.7.mlp.up_proj.weight": "model-00003-of-00004.safetensors", "model.layers.7.post_attention_layernorm.weight": "model-00003-of-00004.safetensors", "model.layers.8.self_attn.o_proj.weight": "model-00003-of-00004.safetensors", "model.layers.8.self_attn.q_proj.weight": "model-00003-of-00004.safetensors", "model.layers.8.self_attn.k_proj.weight": "model-00003-of-00004.safetensors", "model.layers.8.self_attn.v_proj.weight": "model-00003-of-00004.safetensors", "model.layers.8.input_layernorm.weight": "model-00003-of-00004.safetensors", "model.layers.8.mlp.gate_proj.weight": "model-00003-of-00004.safetensors", "model.layers.8.mlp.down_proj.weight": "model-00003-of-00004.safetensors", "model.layers.8.mlp.up_proj.weight": "model-00003-of-00004.safetensors", "model.layers.8.post_attention_layernorm.weight": "model-00003-of-00004.safetensors", "model.layers.9.self_attn.o_proj.weight": "model-00003-of-00004.safetensors", "model.layers.9.self_attn.q_proj.weight": "model-00003-of-00004.safetensors", "model.layers.9.self_attn.k_proj.weight": "model-00003-of-00004.safetensors", "model.layers.9.self_attn.v_proj.weight": "model-00003-of-00004.safetensors", "model.layers.9.input_layernorm.weight": "model-00003-of-00004.safetensors", "model.layers.9.mlp.gate_proj.weight": "model-00003-of-00004.safetensors", "model.layers.9.mlp.down_proj.weight": "model-00003-of-00004.safetensors", "model.layers.9.mlp.up_proj.weight": "model-00003-of-00004.safetensors", "model.layers.9.post_attention_layernorm.weight": "model-00003-of-00004.safetensors", "model.norm.weight": "model-00003-of-00004.safetensors", "model.embed_tokens.weight": "model-00003-of-00004.safetensors", "lm_head.weight": "model-00004-of-00004.safetensors"}}
output-00001-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7d5782458335220c3e954300f04cb207d4e823883e185a64c1b392499450e3f0
3
+ size 4262967508
output-00002-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9cf111584d5280196e32028bd45565cec3be2b151b53e527348b81029b2af714
3
+ size 2026454576
special_tokens_map.json ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|action_start|>",
6
+ "<|action_end|>",
7
+ "<|interpreter|>",
8
+ "<|plugin|>"
9
+ ],
10
+ "bos_token": {
11
+ "content": "<s>",
12
+ "lstrip": false,
13
+ "normalized": false,
14
+ "rstrip": false,
15
+ "single_word": false
16
+ },
17
+ "eos_token": {
18
+ "content": "</s>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "</s>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ },
31
+ "unk_token": {
32
+ "content": "<unk>",
33
+ "lstrip": false,
34
+ "normalized": false,
35
+ "rstrip": false,
36
+ "single_word": false
37
+ }
38
+ }
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f868398fc4e05ee1e8aeba95ddf18ddcc45b8bce55d5093bead5bbf80429b48b
3
+ size 1477754
tokenizer_config.json ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": true,
5
+ "added_tokens_decoder": {
6
+ "0": {
7
+ "content": "<unk>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "1": {
15
+ "content": "<s>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ },
22
+ "2": {
23
+ "content": "</s>",
24
+ "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": false,
27
+ "single_word": false,
28
+ "special": true
29
+ },
30
+ "92538": {
31
+ "content": "<|plugin|>",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false,
36
+ "special": true
37
+ },
38
+ "92539": {
39
+ "content": "<|interpreter|>",
40
+ "lstrip": false,
41
+ "normalized": false,
42
+ "rstrip": false,
43
+ "single_word": false,
44
+ "special": true
45
+ },
46
+ "92540": {
47
+ "content": "<|action_end|>",
48
+ "lstrip": false,
49
+ "normalized": false,
50
+ "rstrip": false,
51
+ "single_word": false,
52
+ "special": true
53
+ },
54
+ "92541": {
55
+ "content": "<|action_start|>",
56
+ "lstrip": false,
57
+ "normalized": false,
58
+ "rstrip": false,
59
+ "single_word": false,
60
+ "special": true
61
+ },
62
+ "92542": {
63
+ "content": "<|im_end|>",
64
+ "lstrip": false,
65
+ "normalized": false,
66
+ "rstrip": false,
67
+ "single_word": false,
68
+ "special": true
69
+ },
70
+ "92543": {
71
+ "content": "<|im_start|>",
72
+ "lstrip": false,
73
+ "normalized": false,
74
+ "rstrip": false,
75
+ "single_word": false,
76
+ "special": true
77
+ }
78
+ },
79
+ "additional_special_tokens": [
80
+ "<|im_start|>",
81
+ "<|im_end|>",
82
+ "<|action_start|>",
83
+ "<|action_end|>",
84
+ "<|interpreter|>",
85
+ "<|plugin|>"
86
+ ],
87
+ "auto_map": {
88
+ "AutoTokenizer": [
89
+ "tokenization_internlm2.InternLM2Tokenizer",
90
+ "tokenization_internlm2_fast.InternLM2TokenizerFast"
91
+ ]
92
+ },
93
+ "bos_token": "<s>",
94
+ "chat_template": "{{ bos_token }}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
95
+ "clean_up_tokenization_spaces": true,
96
+ "decode_with_prefix_space": false,
97
+ "eos_token": "</s>",
98
+ "legacy": true,
99
+ "model_max_length": 1000000000000000019884624838656,
100
+ "pad_token": "</s>",
101
+ "sp_model_kwargs": {},
102
+ "spaces_between_special_tokens": false,
103
+ "tokenizer_class": "LlamaTokenizer",
104
+ "unk_token": "<unk>",
105
+ "use_default_system_prompt": false
106
+ }