sanchit-gandhi
commited on
Upload folder using huggingface_hub
Browse filesUploading initialised weights and configs
- config.json +26 -0
- generation_config.json +6 -0
- model-00001-of-00002.safetensors +3 -0
- model-00002-of-00002.safetensors +3 -0
- model.safetensors.index.json +64 -0
- run.sh +19 -0
- run_initialisation.py +200 -0
- run_initialization.py +200 -0
- special_tokens_map.json +23 -0
- tokenizer.json +0 -0
- tokenizer.model +3 -0
- tokenizer_config.json +43 -0
config.json
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "mistralai/Mistral-7B-Instruct-v0.2",
|
3 |
+
"architectures": [
|
4 |
+
"MistralForCausalLM"
|
5 |
+
],
|
6 |
+
"attention_dropout": 0.0,
|
7 |
+
"bos_token_id": 1,
|
8 |
+
"eos_token_id": 2,
|
9 |
+
"hidden_act": "silu",
|
10 |
+
"hidden_size": 4096,
|
11 |
+
"initializer_range": 0.02,
|
12 |
+
"intermediate_size": 14336,
|
13 |
+
"max_position_embeddings": 32768,
|
14 |
+
"model_type": "mistral",
|
15 |
+
"num_attention_heads": 32,
|
16 |
+
"num_hidden_layers": 6,
|
17 |
+
"num_key_value_heads": 8,
|
18 |
+
"rms_norm_eps": 1e-05,
|
19 |
+
"rope_theta": 1000000.0,
|
20 |
+
"sliding_window": null,
|
21 |
+
"tie_word_embeddings": false,
|
22 |
+
"torch_dtype": "float32",
|
23 |
+
"transformers_version": "4.40.0.dev0",
|
24 |
+
"use_cache": true,
|
25 |
+
"vocab_size": 32000
|
26 |
+
}
|
generation_config.json
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_from_model_config": true,
|
3 |
+
"bos_token_id": 1,
|
4 |
+
"eos_token_id": 2,
|
5 |
+
"transformers_version": "4.40.0.dev0"
|
6 |
+
}
|
model-00001-of-00002.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:80b7693526adafb752e3c2ce80ff926a48b241c822c77a6bcb5783aefd3c85e4
|
3 |
+
size 4987196936
|
model-00002-of-00002.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8f06b5e37485246a87b6b0f180265cba5d6797760e662d59d1370ff85fff74a8
|
3 |
+
size 1296089984
|
model.safetensors.index.json
ADDED
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"metadata": {
|
3 |
+
"total_size": 6283280384
|
4 |
+
},
|
5 |
+
"weight_map": {
|
6 |
+
"lm_head.weight": "model-00002-of-00002.safetensors",
|
7 |
+
"model.embed_tokens.weight": "model-00001-of-00002.safetensors",
|
8 |
+
"model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
9 |
+
"model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
10 |
+
"model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
11 |
+
"model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
12 |
+
"model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
13 |
+
"model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
14 |
+
"model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
15 |
+
"model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
16 |
+
"model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
17 |
+
"model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
18 |
+
"model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
19 |
+
"model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
20 |
+
"model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
21 |
+
"model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
22 |
+
"model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
23 |
+
"model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
24 |
+
"model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
25 |
+
"model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
26 |
+
"model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
27 |
+
"model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
28 |
+
"model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
29 |
+
"model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
30 |
+
"model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
31 |
+
"model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
32 |
+
"model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
33 |
+
"model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
34 |
+
"model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
35 |
+
"model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
36 |
+
"model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
37 |
+
"model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
38 |
+
"model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
39 |
+
"model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
40 |
+
"model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
41 |
+
"model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
42 |
+
"model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
43 |
+
"model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
44 |
+
"model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
45 |
+
"model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
46 |
+
"model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
47 |
+
"model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
48 |
+
"model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
49 |
+
"model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
50 |
+
"model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
51 |
+
"model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
52 |
+
"model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
53 |
+
"model.layers.5.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
54 |
+
"model.layers.5.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
55 |
+
"model.layers.5.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
|
56 |
+
"model.layers.5.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
|
57 |
+
"model.layers.5.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
58 |
+
"model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
59 |
+
"model.layers.5.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
|
60 |
+
"model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
61 |
+
"model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
62 |
+
"model.norm.weight": "model-00002-of-00002.safetensors"
|
63 |
+
}
|
64 |
+
}
|
run.sh
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
#SBATCH --partition=hopper-cpu
|
4 |
+
#SBATCH --name=mistral-init
|
5 |
+
#SBATCH --mem=1g
|
6 |
+
#SBATCH --time=1:00:00
|
7 |
+
#SBATCH --cpus-per-task=1
|
8 |
+
#SBATCH --mem-per-cpu=1
|
9 |
+
#SBATCH -o /fsx/sanchit/logs/init-%j-%x.out
|
10 |
+
|
11 |
+
echo "Starting job"
|
12 |
+
srun python3 run_initialization.py \
|
13 |
+
--model_name_or_path "mistralai/Mistral-7B-Instruct-v0.2" \
|
14 |
+
--num_hidden_layers "6" \
|
15 |
+
--initialization_strategy "first_n" \
|
16 |
+
--output_dir "./" \
|
17 |
+
--push_to_hub
|
18 |
+
wait
|
19 |
+
|
run_initialisation.py
ADDED
@@ -0,0 +1,200 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import copy
|
2 |
+
import logging
|
3 |
+
import os
|
4 |
+
import sys
|
5 |
+
from dataclasses import dataclass, field
|
6 |
+
from pathlib import Path
|
7 |
+
from typing import Optional
|
8 |
+
|
9 |
+
import numpy as np
|
10 |
+
import torch
|
11 |
+
from huggingface_hub import create_repo, get_full_repo_name, upload_folder
|
12 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
|
13 |
+
|
14 |
+
|
15 |
+
logger = logging.getLogger(__name__)
|
16 |
+
|
17 |
+
|
18 |
+
@dataclass
|
19 |
+
class ModelArguments:
|
20 |
+
"""
|
21 |
+
Arguments pertaining to which model/config/tokenizer we are going to fine-tune.
|
22 |
+
"""
|
23 |
+
|
24 |
+
model_name_or_path: Optional[str] = field(
|
25 |
+
metadata={"help": "The teacher checkpoint for weights initialization"},
|
26 |
+
)
|
27 |
+
output_dir: str = field(
|
28 |
+
metadata={"help": "The output directory where the student checkpoint will be written."},
|
29 |
+
)
|
30 |
+
model_revision: Optional[str] = field(
|
31 |
+
default="main",
|
32 |
+
metadata={"help": "The specific teacher model version to use (can be a branch name, tag name or commit id)."},
|
33 |
+
)
|
34 |
+
cache_dir: Optional[str] = field(
|
35 |
+
default=None,
|
36 |
+
metadata={"help": "Where to store the pre-trained models downloaded from huggingface.co"},
|
37 |
+
)
|
38 |
+
subfolder: Optional[str] = field(
|
39 |
+
default="",
|
40 |
+
metadata={
|
41 |
+
"help": "In case the relevant files are located inside a subfolder of the teacher model repo on huggingface.co, you can"
|
42 |
+
"specify the folder name here."
|
43 |
+
},
|
44 |
+
)
|
45 |
+
torch_dtype: Optional[str] = field(
|
46 |
+
default=None,
|
47 |
+
metadata={
|
48 |
+
"help": (
|
49 |
+
"Override the default `torch.dtype` and load the teacher model under this dtype. If `auto` is passed, the "
|
50 |
+
"dtype will be automatically derived from the model's weights."
|
51 |
+
),
|
52 |
+
"choices": ["auto", "bfloat16", "float16", "float32"],
|
53 |
+
},
|
54 |
+
)
|
55 |
+
trust_remote_code: Optional[bool] = field(
|
56 |
+
default=False, metadata={"help": "Trust remote code when loading a model."}
|
57 |
+
)
|
58 |
+
token: Optional[bool] = field(
|
59 |
+
default=True,
|
60 |
+
metadata={
|
61 |
+
"help": "Will use the token generated when running `transformers-cli login` necessary to use this script with private models)."
|
62 |
+
},
|
63 |
+
)
|
64 |
+
num_hidden_layers: Optional[int] = field(
|
65 |
+
default=6,
|
66 |
+
metadata={"help": "The number of hidden layers in the Transformer decoder."},
|
67 |
+
)
|
68 |
+
push_to_hub: Optional[bool] = field(
|
69 |
+
default=False, metadata={"help": "Whether or not to upload the trained model to the model hub after training."}
|
70 |
+
)
|
71 |
+
hub_model_id: Optional[str] = field(
|
72 |
+
default=None, metadata={"help": "The name of the repository to keep in sync with the local `output_dir`."}
|
73 |
+
)
|
74 |
+
low_cpu_mem_usage: Optional[bool] = field(
|
75 |
+
default=True,
|
76 |
+
metadata={
|
77 |
+
"help": "Create the teacher model as an empty shell, and only materialize its parameters when the pretrained weights are loaded. "
|
78 |
+
"Significantly benefits loading time and RAM consumption."
|
79 |
+
},
|
80 |
+
)
|
81 |
+
initialization_strategy: Optional[str] = field(
|
82 |
+
default="maximally_spaced",
|
83 |
+
metadata={
|
84 |
+
"help": "The weight initialization strategy for the decoder weights. Either `first_n`, or `maximally_spaced`."
|
85 |
+
},
|
86 |
+
)
|
87 |
+
|
88 |
+
|
89 |
+
def main():
|
90 |
+
# 1. Parse input arguments
|
91 |
+
parser = HfArgumentParser(ModelArguments)
|
92 |
+
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
|
93 |
+
# If we pass only one argument to the script and it's the path to a json file,
|
94 |
+
# let's parse it to get our arguments.
|
95 |
+
model_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))[0]
|
96 |
+
else:
|
97 |
+
model_args = parser.parse_args_into_dataclasses()[0]
|
98 |
+
|
99 |
+
logger.info(f"Model parameters {model_args}")
|
100 |
+
|
101 |
+
logger.info("*** Load pretrained teacher model ***")
|
102 |
+
torch_dtype = (
|
103 |
+
model_args.torch_dtype if model_args.torch_dtype in ["auto", None] else getattr(torch, model_args.torch_dtype)
|
104 |
+
)
|
105 |
+
# quantization_config = get_quantization_config(model_args)
|
106 |
+
|
107 |
+
teacher_model = AutoModelForCausalLM.from_pretrained(
|
108 |
+
model_args.model_name_or_path,
|
109 |
+
torch_dtype=torch_dtype,
|
110 |
+
low_cpu_mem_usage=model_args.low_cpu_mem_usage,
|
111 |
+
revision=model_args.model_revision,
|
112 |
+
cache_dir=model_args.cache_dir,
|
113 |
+
subfolder=model_args.subfolder,
|
114 |
+
trust_remote_code=model_args.trust_remote_code,
|
115 |
+
token=model_args.token,
|
116 |
+
# device_map=get_kbit_device_map() if quantization_config is not None else None,
|
117 |
+
# quantization_config=quantization_config,
|
118 |
+
)
|
119 |
+
tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path)
|
120 |
+
generation_config = teacher_model.generation_config
|
121 |
+
teacher_config = teacher_model.config
|
122 |
+
|
123 |
+
logger.info("*** Teacher model loaded! ***")
|
124 |
+
|
125 |
+
student_config = copy.deepcopy(teacher_config)
|
126 |
+
student_config.num_hidden_layers = model_args.num_hidden_layers
|
127 |
+
teacher_hidden_layers = teacher_config.num_hidden_layers
|
128 |
+
|
129 |
+
if model_args.initialization_strategy == "maximally_spaced":
|
130 |
+
decoder_mapping = np.linspace(0, teacher_hidden_layers - 1, student_config.num_hidden_layers, dtype=int)
|
131 |
+
elif model_args.initialization_strategy == "first_n":
|
132 |
+
decoder_mapping = np.arange(0, student_config.num_hidden_layers)
|
133 |
+
else:
|
134 |
+
raise ValueError(
|
135 |
+
f"Got invalid initialization_strategy strategy '{model_args.initialization_strategy}', should be one of "
|
136 |
+
"'maximally_spaced` or `first_n`."
|
137 |
+
)
|
138 |
+
# always use the last teacher layer as the last student layer
|
139 |
+
decoder_mapping[-1] = teacher_hidden_layers - 1
|
140 |
+
|
141 |
+
decoder_map = {}
|
142 |
+
for student_layer, teacher_layer in enumerate(decoder_mapping):
|
143 |
+
decoder_map[teacher_layer] = student_layer
|
144 |
+
|
145 |
+
# init the student params from the teacher model
|
146 |
+
logger.info("*** Load and initialise student model ***")
|
147 |
+
student_model = AutoModelForCausalLM.from_config(student_config)
|
148 |
+
missing_keys, unexpected_keys = student_model.load_state_dict(teacher_model.state_dict(), strict=False)
|
149 |
+
student_model.to(dtype=torch_dtype)
|
150 |
+
if len(missing_keys) > 0:
|
151 |
+
raise RuntimeError(
|
152 |
+
f"Error(s) in loading state_dict for {student_model.__class__.__name__}. \n"
|
153 |
+
f"Missing key(s) in state_dict: {missing_keys}"
|
154 |
+
)
|
155 |
+
if student_config.num_hidden_layers == teacher_hidden_layers:
|
156 |
+
decoder_keys = [key for key in unexpected_keys if "model.layers" in key]
|
157 |
+
if len(decoder_keys) > 0:
|
158 |
+
raise RuntimeError(
|
159 |
+
f"Error(s) in loading state_dict for {student_model.__class__.__name__}. \n"
|
160 |
+
f"Unexpected key(s) in state_dict: {decoder_keys}"
|
161 |
+
)
|
162 |
+
|
163 |
+
for layer in range(teacher_hidden_layers):
|
164 |
+
if layer in decoder_map:
|
165 |
+
# re-introduce pre-defined layers from the teacher
|
166 |
+
student_model.model.layers[decoder_map[layer]].load_state_dict(
|
167 |
+
teacher_model.model.layers[layer].state_dict()
|
168 |
+
)
|
169 |
+
|
170 |
+
logger.info("*** Student model loaded! ***")
|
171 |
+
|
172 |
+
# remove the teacher params and model
|
173 |
+
del teacher_model
|
174 |
+
|
175 |
+
# save the converted weights and model
|
176 |
+
if model_args.output_dir is not None:
|
177 |
+
student_model.save_pretrained(model_args.output_dir)
|
178 |
+
# we also need to correctly save the processor and generation config
|
179 |
+
tokenizer.save_pretrained(model_args.output_dir)
|
180 |
+
generation_config.save_pretrained(model_args.output_dir)
|
181 |
+
|
182 |
+
if model_args.push_to_hub:
|
183 |
+
if model_args.hub_model_id is None:
|
184 |
+
repo_name = get_full_repo_name(
|
185 |
+
Path(model_args.output_dir).absolute().name,
|
186 |
+
token=model_args.token,
|
187 |
+
)
|
188 |
+
else:
|
189 |
+
repo_name = model_args.hub_model_id
|
190 |
+
create_repo(repo_name, exist_ok=True, token=model_args.token)
|
191 |
+
upload_folder(
|
192 |
+
repo_id=repo_name,
|
193 |
+
folder_path=model_args.output_dir,
|
194 |
+
commit_description="Uploading initialised weights and configs",
|
195 |
+
)
|
196 |
+
|
197 |
+
|
198 |
+
if __name__ == "__main__":
|
199 |
+
main()
|
200 |
+
|
run_initialization.py
ADDED
@@ -0,0 +1,200 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import copy
|
2 |
+
import logging
|
3 |
+
import os
|
4 |
+
import sys
|
5 |
+
from dataclasses import dataclass, field
|
6 |
+
from pathlib import Path
|
7 |
+
from typing import Optional
|
8 |
+
|
9 |
+
import numpy as np
|
10 |
+
import torch
|
11 |
+
from huggingface_hub import create_repo, get_full_repo_name, upload_folder
|
12 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
|
13 |
+
|
14 |
+
|
15 |
+
logger = logging.getLogger(__name__)
|
16 |
+
|
17 |
+
|
18 |
+
@dataclass
|
19 |
+
class ModelArguments:
|
20 |
+
"""
|
21 |
+
Arguments pertaining to which model/config/tokenizer we are going to fine-tune.
|
22 |
+
"""
|
23 |
+
|
24 |
+
model_name_or_path: Optional[str] = field(
|
25 |
+
metadata={"help": "The teacher checkpoint for weights initialization"},
|
26 |
+
)
|
27 |
+
output_dir: str = field(
|
28 |
+
metadata={"help": "The output directory where the student checkpoint will be written."},
|
29 |
+
)
|
30 |
+
model_revision: Optional[str] = field(
|
31 |
+
default="main",
|
32 |
+
metadata={"help": "The specific teacher model version to use (can be a branch name, tag name or commit id)."},
|
33 |
+
)
|
34 |
+
cache_dir: Optional[str] = field(
|
35 |
+
default=None,
|
36 |
+
metadata={"help": "Where to store the pre-trained models downloaded from huggingface.co"},
|
37 |
+
)
|
38 |
+
subfolder: Optional[str] = field(
|
39 |
+
default="",
|
40 |
+
metadata={
|
41 |
+
"help": "In case the relevant files are located inside a subfolder of the teacher model repo on huggingface.co, you can"
|
42 |
+
"specify the folder name here."
|
43 |
+
},
|
44 |
+
)
|
45 |
+
torch_dtype: Optional[str] = field(
|
46 |
+
default=None,
|
47 |
+
metadata={
|
48 |
+
"help": (
|
49 |
+
"Override the default `torch.dtype` and load the teacher model under this dtype. If `auto` is passed, the "
|
50 |
+
"dtype will be automatically derived from the model's weights."
|
51 |
+
),
|
52 |
+
"choices": ["auto", "bfloat16", "float16", "float32"],
|
53 |
+
},
|
54 |
+
)
|
55 |
+
trust_remote_code: Optional[bool] = field(
|
56 |
+
default=False, metadata={"help": "Trust remote code when loading a model."}
|
57 |
+
)
|
58 |
+
token: Optional[bool] = field(
|
59 |
+
default=True,
|
60 |
+
metadata={
|
61 |
+
"help": "Will use the token generated when running `transformers-cli login` necessary to use this script with private models)."
|
62 |
+
},
|
63 |
+
)
|
64 |
+
num_hidden_layers: Optional[int] = field(
|
65 |
+
default=6,
|
66 |
+
metadata={"help": "The number of hidden layers in the Transformer decoder."},
|
67 |
+
)
|
68 |
+
push_to_hub: Optional[bool] = field(
|
69 |
+
default=False, metadata={"help": "Whether or not to upload the trained model to the model hub after training."}
|
70 |
+
)
|
71 |
+
hub_model_id: Optional[str] = field(
|
72 |
+
default=None, metadata={"help": "The name of the repository to keep in sync with the local `output_dir`."}
|
73 |
+
)
|
74 |
+
low_cpu_mem_usage: Optional[bool] = field(
|
75 |
+
default=True,
|
76 |
+
metadata={
|
77 |
+
"help": "Create the teacher model as an empty shell, and only materialize its parameters when the pretrained weights are loaded. "
|
78 |
+
"Significantly benefits loading time and RAM consumption."
|
79 |
+
},
|
80 |
+
)
|
81 |
+
initialization_strategy: Optional[str] = field(
|
82 |
+
default="maximally_spaced",
|
83 |
+
metadata={
|
84 |
+
"help": "The weight initialization strategy for the decoder weights. Either `first_n`, or `maximally_spaced`."
|
85 |
+
},
|
86 |
+
)
|
87 |
+
|
88 |
+
|
89 |
+
def main():
|
90 |
+
# 1. Parse input arguments
|
91 |
+
parser = HfArgumentParser(ModelArguments)
|
92 |
+
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
|
93 |
+
# If we pass only one argument to the script and it's the path to a json file,
|
94 |
+
# let's parse it to get our arguments.
|
95 |
+
model_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))[0]
|
96 |
+
else:
|
97 |
+
model_args = parser.parse_args_into_dataclasses()[0]
|
98 |
+
|
99 |
+
logger.info(f"Model parameters {model_args}")
|
100 |
+
|
101 |
+
logger.info("*** Load pretrained teacher model ***")
|
102 |
+
torch_dtype = (
|
103 |
+
model_args.torch_dtype if model_args.torch_dtype in ["auto", None] else getattr(torch, model_args.torch_dtype)
|
104 |
+
)
|
105 |
+
# quantization_config = get_quantization_config(model_args)
|
106 |
+
|
107 |
+
teacher_model = AutoModelForCausalLM.from_pretrained(
|
108 |
+
model_args.model_name_or_path,
|
109 |
+
torch_dtype=torch_dtype,
|
110 |
+
low_cpu_mem_usage=model_args.low_cpu_mem_usage,
|
111 |
+
revision=model_args.model_revision,
|
112 |
+
cache_dir=model_args.cache_dir,
|
113 |
+
subfolder=model_args.subfolder,
|
114 |
+
trust_remote_code=model_args.trust_remote_code,
|
115 |
+
token=model_args.token,
|
116 |
+
# device_map=get_kbit_device_map() if quantization_config is not None else None,
|
117 |
+
# quantization_config=quantization_config,
|
118 |
+
)
|
119 |
+
tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path)
|
120 |
+
generation_config = teacher_model.generation_config
|
121 |
+
teacher_config = teacher_model.config
|
122 |
+
|
123 |
+
logger.info("*** Teacher model loaded! ***")
|
124 |
+
|
125 |
+
student_config = copy.deepcopy(teacher_config)
|
126 |
+
student_config.num_hidden_layers = model_args.num_hidden_layers
|
127 |
+
teacher_hidden_layers = teacher_config.num_hidden_layers
|
128 |
+
|
129 |
+
if model_args.initialization_strategy == "maximally_spaced":
|
130 |
+
decoder_mapping = np.linspace(0, teacher_hidden_layers - 1, student_config.num_hidden_layers, dtype=int)
|
131 |
+
elif model_args.initialization_strategy == "first_n":
|
132 |
+
decoder_mapping = np.arange(0, student_config.num_hidden_layers)
|
133 |
+
else:
|
134 |
+
raise ValueError(
|
135 |
+
f"Got invalid initialization_strategy strategy '{model_args.initialization_strategy}', should be one of "
|
136 |
+
"'maximally_spaced` or `first_n`."
|
137 |
+
)
|
138 |
+
# always use the last teacher layer as the last student layer
|
139 |
+
decoder_mapping[-1] = teacher_hidden_layers - 1
|
140 |
+
|
141 |
+
decoder_map = {}
|
142 |
+
for student_layer, teacher_layer in enumerate(decoder_mapping):
|
143 |
+
decoder_map[teacher_layer] = student_layer
|
144 |
+
|
145 |
+
# init the student params from the teacher model
|
146 |
+
logger.info("*** Load and initialise student model ***")
|
147 |
+
student_model = AutoModelForCausalLM.from_config(student_config)
|
148 |
+
missing_keys, unexpected_keys = student_model.load_state_dict(teacher_model.state_dict(), strict=False)
|
149 |
+
student_model.to(dtype=torch_dtype)
|
150 |
+
if len(missing_keys) > 0:
|
151 |
+
raise RuntimeError(
|
152 |
+
f"Error(s) in loading state_dict for {student_model.__class__.__name__}. \n"
|
153 |
+
f"Missing key(s) in state_dict: {missing_keys}"
|
154 |
+
)
|
155 |
+
if student_config.num_hidden_layers == teacher_hidden_layers:
|
156 |
+
decoder_keys = [key for key in unexpected_keys if "model.layers" in key]
|
157 |
+
if len(decoder_keys) > 0:
|
158 |
+
raise RuntimeError(
|
159 |
+
f"Error(s) in loading state_dict for {student_model.__class__.__name__}. \n"
|
160 |
+
f"Unexpected key(s) in state_dict: {decoder_keys}"
|
161 |
+
)
|
162 |
+
|
163 |
+
for layer in range(teacher_hidden_layers):
|
164 |
+
if layer in decoder_map:
|
165 |
+
# re-introduce pre-defined layers from the teacher
|
166 |
+
student_model.model.layers[decoder_map[layer]].load_state_dict(
|
167 |
+
teacher_model.model.layers[layer].state_dict()
|
168 |
+
)
|
169 |
+
|
170 |
+
logger.info("*** Student model loaded! ***")
|
171 |
+
|
172 |
+
# remove the teacher params and model
|
173 |
+
del teacher_model
|
174 |
+
|
175 |
+
# save the converted weights and model
|
176 |
+
if model_args.output_dir is not None:
|
177 |
+
student_model.save_pretrained(model_args.output_dir)
|
178 |
+
# we also need to correctly save the processor and generation config
|
179 |
+
tokenizer.save_pretrained(model_args.output_dir)
|
180 |
+
generation_config.save_pretrained(model_args.output_dir)
|
181 |
+
|
182 |
+
if model_args.push_to_hub:
|
183 |
+
if model_args.hub_model_id is None:
|
184 |
+
repo_name = get_full_repo_name(
|
185 |
+
Path(model_args.output_dir).absolute().name,
|
186 |
+
token=model_args.token,
|
187 |
+
)
|
188 |
+
else:
|
189 |
+
repo_name = model_args.hub_model_id
|
190 |
+
create_repo(repo_name, exist_ok=True, token=model_args.token)
|
191 |
+
upload_folder(
|
192 |
+
repo_id=repo_name,
|
193 |
+
folder_path=model_args.output_dir,
|
194 |
+
commit_description="Uploading initialised weights and configs",
|
195 |
+
)
|
196 |
+
|
197 |
+
|
198 |
+
if __name__ == "__main__":
|
199 |
+
main()
|
200 |
+
|
special_tokens_map.json
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": {
|
3 |
+
"content": "<s>",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": false,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"eos_token": {
|
10 |
+
"content": "</s>",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": false,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"unk_token": {
|
17 |
+
"content": "<unk>",
|
18 |
+
"lstrip": false,
|
19 |
+
"normalized": false,
|
20 |
+
"rstrip": false,
|
21 |
+
"single_word": false
|
22 |
+
}
|
23 |
+
}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer.model
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:dadfd56d766715c61d2ef780a525ab43b8e6da4de6865bda3d95fdef5e134055
|
3 |
+
size 493443
|
tokenizer_config.json
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_bos_token": true,
|
3 |
+
"add_eos_token": false,
|
4 |
+
"added_tokens_decoder": {
|
5 |
+
"0": {
|
6 |
+
"content": "<unk>",
|
7 |
+
"lstrip": false,
|
8 |
+
"normalized": false,
|
9 |
+
"rstrip": false,
|
10 |
+
"single_word": false,
|
11 |
+
"special": true
|
12 |
+
},
|
13 |
+
"1": {
|
14 |
+
"content": "<s>",
|
15 |
+
"lstrip": false,
|
16 |
+
"normalized": false,
|
17 |
+
"rstrip": false,
|
18 |
+
"single_word": false,
|
19 |
+
"special": true
|
20 |
+
},
|
21 |
+
"2": {
|
22 |
+
"content": "</s>",
|
23 |
+
"lstrip": false,
|
24 |
+
"normalized": false,
|
25 |
+
"rstrip": false,
|
26 |
+
"single_word": false,
|
27 |
+
"special": true
|
28 |
+
}
|
29 |
+
},
|
30 |
+
"additional_special_tokens": [],
|
31 |
+
"bos_token": "<s>",
|
32 |
+
"chat_template": "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token}}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}",
|
33 |
+
"clean_up_tokenization_spaces": false,
|
34 |
+
"eos_token": "</s>",
|
35 |
+
"legacy": true,
|
36 |
+
"model_max_length": 1000000000000000019884624838656,
|
37 |
+
"pad_token": null,
|
38 |
+
"sp_model_kwargs": {},
|
39 |
+
"spaces_between_special_tokens": false,
|
40 |
+
"tokenizer_class": "LlamaTokenizer",
|
41 |
+
"unk_token": "<unk>",
|
42 |
+
"use_default_system_prompt": false
|
43 |
+
}
|