Upload folder using huggingface_hub
Browse files- README.md +2 -2
- config.json +32 -0
- configuration_zhinao.py +92 -0
- generation_config.json +14 -0
- generation_utils.py +186 -0
- latest +1 -0
- modeling_zhinao.py +1097 -0
- pytorch_model.bin +2 -2
- rng_state_0.pth +3 -0
- rng_state_2.pth +3 -0
- rng_state_3.pth +3 -0
- rng_state_4.pth +3 -0
- rng_state_5.pth +3 -0
- rng_state_7.pth +3 -0
- special_tokens_map.json +3 -0
- tokenization_zhinao.py +279 -0
- tokenizer_config.json +17 -0
- training_args.bin +3 -0
- vocab/360.tiktoken +0 -0
- zero_to_fp32.py +592 -0
README.md
CHANGED
@@ -70,7 +70,7 @@ We have validated the performance of our model on the [mteb-chinese-reranking le
|
|
70 |
|
71 |
| Model | T2Reranking | MMarcoReranking | CMedQAv1 | CMedQAv2 | Avg |
|
72 |
|:-------------------------------|:--------:|:--------:|:--------:|:--------:|:--------:|
|
73 |
-
| **360Zhinao-1_8B-
|
74 |
| piccolo-large-zh-v2 | 67.15 | 33.39 | 90.14 | 89.31 | 70 |
|
75 |
| Baichuan-text-embedding | 67.85 | 34.3 | 88.46 | 88.06 | 69.67 |
|
76 |
| stella-mrl-large-zh-v3.5-1792d | 66.43 | 28.85 | 89.18 | 89.33 | 68.45 |
|
@@ -274,7 +274,7 @@ class FlagRerankerCustom:
|
|
274 |
|
275 |
|
276 |
if __name__ == "__main__":
|
277 |
-
model_name_or_path = "360Zhinao-1_8B-
|
278 |
model = FlagRerankerCustom(model_name_or_path, use_fp16=False)
|
279 |
inputs=[["What Color Is the Sky","Blue"], ["What Color Is the Sky","Pink"],]
|
280 |
ret = model.compute_score(inputs)
|
|
|
70 |
|
71 |
| Model | T2Reranking | MMarcoReranking | CMedQAv1 | CMedQAv2 | Avg |
|
72 |
|:-------------------------------|:--------:|:--------:|:--------:|:--------:|:--------:|
|
73 |
+
| **360Zhinao-1_8B-Reranking** | **68.55** | **37.29** | **86.75** | **87.92** | **70.13** |
|
74 |
| piccolo-large-zh-v2 | 67.15 | 33.39 | 90.14 | 89.31 | 70 |
|
75 |
| Baichuan-text-embedding | 67.85 | 34.3 | 88.46 | 88.06 | 69.67 |
|
76 |
| stella-mrl-large-zh-v3.5-1792d | 66.43 | 28.85 | 89.18 | 89.33 | 68.45 |
|
|
|
274 |
|
275 |
|
276 |
if __name__ == "__main__":
|
277 |
+
model_name_or_path = "360Zhinao-1_8B-Reranking"
|
278 |
model = FlagRerankerCustom(model_name_or_path, use_fp16=False)
|
279 |
inputs=[["What Color Is the Sky","Blue"], ["What Color Is the Sky","Pink"],]
|
280 |
ret = model.compute_score(inputs)
|
config.json
CHANGED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "/home/jovyan/testdata/zhangmengyu/gpt/checkpoints/zhinao2bbase_cmedqahn10rand10_2e5_18epoch_best",
|
3 |
+
"architectures": [
|
4 |
+
"ZhinaoForCausalLM"
|
5 |
+
],
|
6 |
+
"auto_map": {
|
7 |
+
"AutoConfig": "configuration_zhinao.ZhinaoConfig",
|
8 |
+
"AutoModelForCausalLM": "modeling_zhinao.ZhinaoForCausalLM"
|
9 |
+
},
|
10 |
+
"bf16": true,
|
11 |
+
"flah-attn_version": "2.5.5",
|
12 |
+
"fp16": false,
|
13 |
+
"hidden_act": "silu",
|
14 |
+
"hidden_size": 2048,
|
15 |
+
"initializer_range": 0.01,
|
16 |
+
"intermediate_size": 5632,
|
17 |
+
"max_position_embeddings": 4096,
|
18 |
+
"model_max_length": 4096,
|
19 |
+
"model_type": "zhinao",
|
20 |
+
"num_attention_heads": 16,
|
21 |
+
"num_hidden_layers": 24,
|
22 |
+
"num_key_value_heads": 16,
|
23 |
+
"rms_norm_eps": 1e-05,
|
24 |
+
"rope_scaling": null,
|
25 |
+
"rope_theta": 10000.0,
|
26 |
+
"tie_word_embeddings": false,
|
27 |
+
"torch_dtype": "bfloat16",
|
28 |
+
"transformers_version": "4.34.0",
|
29 |
+
"use_cache": false,
|
30 |
+
"use_flash_attn": true,
|
31 |
+
"vocab_size": 158464
|
32 |
+
}
|
configuration_zhinao.py
CHANGED
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) 360zhinao and the HuggingFace Inc. team. All rights reserved.
|
2 |
+
# This code is built upon Huggingface's transformers repository.
|
3 |
+
|
4 |
+
|
5 |
+
from transformers.configuration_utils import PretrainedConfig
|
6 |
+
from transformers.utils import logging
|
7 |
+
|
8 |
+
|
9 |
+
logger = logging.get_logger(__name__)
|
10 |
+
|
11 |
+
|
12 |
+
class ZhinaoConfig(PretrainedConfig):
|
13 |
+
|
14 |
+
model_type = "zhinao"
|
15 |
+
keys_to_ignore_at_inference = ["past_key_values"]
|
16 |
+
|
17 |
+
def __init__(
|
18 |
+
self,
|
19 |
+
vocab_size=32000,
|
20 |
+
hidden_size=4096,
|
21 |
+
intermediate_size=11008,
|
22 |
+
num_hidden_layers=32,
|
23 |
+
num_attention_heads=32,
|
24 |
+
num_key_value_heads=None,
|
25 |
+
hidden_act="silu",
|
26 |
+
max_position_embeddings=2048,
|
27 |
+
initializer_range=0.02,
|
28 |
+
rms_norm_eps=1e-6,
|
29 |
+
use_cache=True,
|
30 |
+
pad_token_id=None,
|
31 |
+
bos_token_id=None,
|
32 |
+
eos_token_id=None,
|
33 |
+
tie_word_embeddings=False,
|
34 |
+
rope_theta=10000.0,
|
35 |
+
rope_scaling=None,
|
36 |
+
bf16 = False,
|
37 |
+
fp16 = False,
|
38 |
+
use_flash_attn="auto",
|
39 |
+
**kwargs,
|
40 |
+
):
|
41 |
+
self.vocab_size = vocab_size
|
42 |
+
self.max_position_embeddings = max_position_embeddings
|
43 |
+
self.hidden_size = hidden_size
|
44 |
+
self.intermediate_size = intermediate_size
|
45 |
+
self.num_hidden_layers = num_hidden_layers
|
46 |
+
self.num_attention_heads = num_attention_heads
|
47 |
+
|
48 |
+
# for backward compatibility
|
49 |
+
if num_key_value_heads is None:
|
50 |
+
num_key_value_heads = num_attention_heads
|
51 |
+
|
52 |
+
self.num_key_value_heads = num_key_value_heads
|
53 |
+
self.hidden_act = hidden_act
|
54 |
+
self.initializer_range = initializer_range
|
55 |
+
self.rms_norm_eps = rms_norm_eps
|
56 |
+
self.use_cache = use_cache
|
57 |
+
self.rope_theta = rope_theta
|
58 |
+
self.rope_scaling = rope_scaling
|
59 |
+
self._rope_scaling_validation()
|
60 |
+
|
61 |
+
self.bf16 = bf16
|
62 |
+
self.fp16 = fp16
|
63 |
+
self.use_flash_attn = use_flash_attn
|
64 |
+
|
65 |
+
super().__init__(
|
66 |
+
pad_token_id=pad_token_id,
|
67 |
+
bos_token_id=bos_token_id,
|
68 |
+
eos_token_id=eos_token_id,
|
69 |
+
tie_word_embeddings=tie_word_embeddings,
|
70 |
+
**kwargs,
|
71 |
+
)
|
72 |
+
|
73 |
+
def _rope_scaling_validation(self):
|
74 |
+
"""
|
75 |
+
Validate the `rope_scaling` configuration.
|
76 |
+
"""
|
77 |
+
if self.rope_scaling is None:
|
78 |
+
return
|
79 |
+
|
80 |
+
if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
|
81 |
+
raise ValueError(
|
82 |
+
"`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, "
|
83 |
+
f"got {self.rope_scaling}"
|
84 |
+
)
|
85 |
+
rope_scaling_type = self.rope_scaling.get("type", None)
|
86 |
+
rope_scaling_factor = self.rope_scaling.get("factor", None)
|
87 |
+
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic", "ntk"]:
|
88 |
+
raise ValueError(
|
89 |
+
f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}"
|
90 |
+
)
|
91 |
+
if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0:
|
92 |
+
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}")
|
generation_config.json
CHANGED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_from_model_config": true,
|
3 |
+
"do_sample": true,
|
4 |
+
"eos_token_id": [
|
5 |
+
158326,
|
6 |
+
158332,
|
7 |
+
158333
|
8 |
+
],
|
9 |
+
"max_new_tokens": 1024,
|
10 |
+
"pad_token_id": 158326,
|
11 |
+
"top_k": 0,
|
12 |
+
"top_p": 0.8,
|
13 |
+
"transformers_version": "4.34.0"
|
14 |
+
}
|
generation_utils.py
CHANGED
@@ -0,0 +1,186 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import numpy as np
|
3 |
+
from queue import Queue
|
4 |
+
from typing import Tuple, List, Union, Iterable
|
5 |
+
from transformers.utils import logging, add_start_docstrings
|
6 |
+
from transformers.generation.logits_process import LogitsProcessor, LOGITS_PROCESSOR_INPUTS_DOCSTRING, LogitsProcessorList
|
7 |
+
|
8 |
+
|
9 |
+
def make_context(model, tokenizer,
|
10 |
+
messages: List[dict],
|
11 |
+
system: str = "You are a helpful assistant.",
|
12 |
+
max_new_tokens: int=0,
|
13 |
+
):
|
14 |
+
|
15 |
+
max_new_tokens = max_new_tokens or model.generation_config.max_new_tokens
|
16 |
+
max_input_length = model.config.model_max_length - max_new_tokens
|
17 |
+
|
18 |
+
im_start_id = [tokenizer.im_start_id]
|
19 |
+
im_end_id = [tokenizer.im_end_id]
|
20 |
+
nl_tokens = tokenizer.encode("\n")
|
21 |
+
|
22 |
+
def _tokenize_str(role, content):
|
23 |
+
return tokenizer.encode(role, allowed_special=set()) + nl_tokens + tokenizer.encode(content, allowed_special=set())
|
24 |
+
|
25 |
+
def _parse_messages(messages):
|
26 |
+
system, query, history = "", "", []
|
27 |
+
## system
|
28 |
+
if messages[0]["role"] == "system":
|
29 |
+
system = messages[0]["content"]
|
30 |
+
messages = messages[1:]
|
31 |
+
## query
|
32 |
+
assert messages[-1]["role"] == "user"
|
33 |
+
query = messages[-1]["content"]
|
34 |
+
messages = messages[:-1]
|
35 |
+
## history
|
36 |
+
assert len(messages) % 2 == 0
|
37 |
+
for i in range(0, len(messages), 2):
|
38 |
+
assert messages[i]["role"] == "user" and messages[i+1]["role"] == "assistant"
|
39 |
+
history.append([messages[i]["content"], messages[i+1]["content"]])
|
40 |
+
|
41 |
+
return system, query, history
|
42 |
+
|
43 |
+
_system, query, history = _parse_messages(messages)
|
44 |
+
|
45 |
+
## system
|
46 |
+
system_text = _system if _system != "" else system
|
47 |
+
system_tokens = []
|
48 |
+
if system_text:
|
49 |
+
system_tokens = im_start_id + _tokenize_str("system", system_text) + im_end_id + nl_tokens
|
50 |
+
|
51 |
+
## query
|
52 |
+
query_tokens = im_start_id + _tokenize_str("user", query) + im_end_id + nl_tokens
|
53 |
+
## final assistant
|
54 |
+
final_tokens = im_start_id + tokenizer.encode("assistant", allowed_special=set()) + nl_tokens
|
55 |
+
|
56 |
+
## max_history_tokens
|
57 |
+
max_history_length = max_input_length - len(system_tokens) - len(query_tokens) - len(final_tokens)
|
58 |
+
|
59 |
+
## history
|
60 |
+
context_tokens = []
|
61 |
+
for turn_query, turn_response in reversed(history):
|
62 |
+
## query tokens
|
63 |
+
history_query_tokens = im_start_id + _tokenize_str("user", turn_query) + im_end_id + nl_tokens
|
64 |
+
## answer tokens
|
65 |
+
histroy_response_tokens = im_start_id + _tokenize_str("assistant", turn_response) + im_end_id + nl_tokens
|
66 |
+
## this round tokens
|
67 |
+
next_context_tokens = history_query_tokens + histroy_response_tokens
|
68 |
+
## concat
|
69 |
+
current_context_size = len(next_context_tokens) + len(context_tokens)
|
70 |
+
if current_context_size < max_history_length:
|
71 |
+
context_tokens = next_context_tokens + context_tokens
|
72 |
+
else:
|
73 |
+
break
|
74 |
+
input_tokens = system_tokens + context_tokens + query_tokens + final_tokens
|
75 |
+
|
76 |
+
return torch.LongTensor([input_tokens]).to(model.device)
|
77 |
+
|
78 |
+
|
79 |
+
class TextIterStreamer:
|
80 |
+
def __init__(self, tokenizer, skip_prompt=False, skip_special_tokens=False):
|
81 |
+
self.tokenizer = tokenizer
|
82 |
+
self.skip_prompt = skip_prompt
|
83 |
+
self.skip_special_tokens = skip_special_tokens
|
84 |
+
self.tokens = []
|
85 |
+
self.text_queue = Queue()
|
86 |
+
self.next_tokens_are_prompt = True
|
87 |
+
|
88 |
+
def put(self, value):
|
89 |
+
if self.skip_prompt and self.next_tokens_are_prompt:
|
90 |
+
self.next_tokens_are_prompt = False
|
91 |
+
else:
|
92 |
+
if len(value.shape) > 1:
|
93 |
+
value = value[0]
|
94 |
+
self.tokens.extend(value.tolist())
|
95 |
+
self.text_queue.put(
|
96 |
+
self.tokenizer.decode(self.tokens, skip_special_tokens=self.skip_special_tokens, errors='ignore'))
|
97 |
+
|
98 |
+
def end(self):
|
99 |
+
self.text_queue.put(None)
|
100 |
+
|
101 |
+
def __iter__(self):
|
102 |
+
return self
|
103 |
+
|
104 |
+
def __next__(self):
|
105 |
+
value = self.text_queue.get()
|
106 |
+
if value is None:
|
107 |
+
raise StopIteration()
|
108 |
+
else:
|
109 |
+
return value
|
110 |
+
|
111 |
+
|
112 |
+
class OutputRepetitionPenaltyLogitsProcessor(LogitsProcessor):
|
113 |
+
r"""
|
114 |
+
[`OutputLogitsProcessor`] that prevents the repetition of previous tokens through a penalty. This penalty is applied at
|
115 |
+
most once per token. Note that, for decoder-only models like most LLMs, the considered tokens include the prompt.
|
116 |
+
|
117 |
+
In the original [paper](https://arxiv.org/pdf/1909.05858.pdf), the authors suggest the use of a penalty of around
|
118 |
+
1.2 to achieve a good balance between truthful generation and lack of repetition. To penalize and reduce
|
119 |
+
repetition, use `penalty` values above 1.0, where a higher value penalizes more strongly. To reward and encourage
|
120 |
+
repetition, use `penalty` values between 0.0 and 1.0, where a lower value rewards more strongly.
|
121 |
+
|
122 |
+
Args:
|
123 |
+
penalty (`float`):
|
124 |
+
The parameter for repetition penalty. 1.0 means no penalty. Above 1.0 penalizes previously generated
|
125 |
+
tokens. Between 0.0 and 1.0 rewards previously generated tokens.
|
126 |
+
"""
|
127 |
+
|
128 |
+
def __init__(self, input_length: int,
|
129 |
+
presence_penalties: float = 1.0,
|
130 |
+
frequency_penalties: float = 0,
|
131 |
+
repetition_penalties: float = 0):
|
132 |
+
if not (repetition_penalties > 0):
|
133 |
+
raise ValueError(f"`repetition_penalties` has to be a strictly positive float, but is {repetition_penalties}")
|
134 |
+
if not ( (frequency_penalties >= -2) and (frequency_penalties <= 2) ):
|
135 |
+
raise ValueError(f"`frequency_penalties` has to be [-2, 2], but is {frequency_penalties}")
|
136 |
+
if not ( (presence_penalties >= -2) and (presence_penalties <= 2) ):
|
137 |
+
raise ValueError(f"`presence_penalties` has to be [-2, 2], but is {presence_penalties}")
|
138 |
+
|
139 |
+
self.repetition_penalties = repetition_penalties
|
140 |
+
self.frequency_penalties = frequency_penalties
|
141 |
+
self.presence_penalties = presence_penalties
|
142 |
+
self.input_length = input_length
|
143 |
+
|
144 |
+
def _get_bin_counts_and_mask(
|
145 |
+
self,
|
146 |
+
tokens: torch.Tensor,
|
147 |
+
vocab_size: int,
|
148 |
+
num_seqs: int,
|
149 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
150 |
+
# Compute the bin counts for the tokens.
|
151 |
+
# vocab_size + 1 for padding.
|
152 |
+
bin_counts = torch.zeros((num_seqs, vocab_size + 1),
|
153 |
+
dtype=torch.long,
|
154 |
+
device=tokens.device)
|
155 |
+
bin_counts.scatter_add_(1, tokens, torch.ones_like(tokens))
|
156 |
+
bin_counts = bin_counts[:, :vocab_size]
|
157 |
+
mask = bin_counts > 0
|
158 |
+
|
159 |
+
return bin_counts, mask
|
160 |
+
|
161 |
+
@add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)
|
162 |
+
def __call__(self, input_ids: torch.LongTensor, logits: torch.FloatTensor) -> torch.FloatTensor:
|
163 |
+
prompt_tokens_tensor = input_ids[:, :self.input_length+1]
|
164 |
+
output_tokens_tensor = input_ids[:, self.input_length+1:]
|
165 |
+
|
166 |
+
num_seqs, vocab_size = logits.shape
|
167 |
+
_, prompt_mask = self._get_bin_counts_and_mask(
|
168 |
+
prompt_tokens_tensor, vocab_size, num_seqs)
|
169 |
+
output_bin_counts, output_mask = self._get_bin_counts_and_mask(
|
170 |
+
output_tokens_tensor, vocab_size, num_seqs)
|
171 |
+
|
172 |
+
repetition_penalties = torch.Tensor([self.repetition_penalties]).to(logits.device)
|
173 |
+
frequency_penalties = torch.Tensor([self.frequency_penalties]).to(logits.device)
|
174 |
+
presence_penalties = torch.Tensor([self.presence_penalties]).to(logits.device)
|
175 |
+
|
176 |
+
repetition_penalties = repetition_penalties[:, None].repeat(1, vocab_size)
|
177 |
+
repetition_penalties[~(prompt_mask | output_mask)] = 1.0
|
178 |
+
logits = torch.where(logits > 0, logits / repetition_penalties,
|
179 |
+
logits * repetition_penalties)
|
180 |
+
|
181 |
+
# We follow the definition in OpenAI API.
|
182 |
+
# Refer to https://platform.openai.com/docs/api-reference/parameter-details
|
183 |
+
logits -= frequency_penalties.unsqueeze_(dim=1) * output_bin_counts
|
184 |
+
logits -= presence_penalties.unsqueeze_(dim=1) * output_mask
|
185 |
+
|
186 |
+
return logits
|
latest
CHANGED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
global_step11000
|
modeling_zhinao.py
CHANGED
@@ -0,0 +1,1097 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) 360zhinao and the HuggingFace Inc. team. All rights reserved.
|
2 |
+
# This code is built upon Huggingface's transformers repository.
|
3 |
+
|
4 |
+
import math
|
5 |
+
import warnings
|
6 |
+
from threading import Thread
|
7 |
+
from typing import List, Optional, Tuple, Union
|
8 |
+
|
9 |
+
import torch
|
10 |
+
import torch.nn.functional as F
|
11 |
+
import torch.utils.checkpoint
|
12 |
+
from torch import nn
|
13 |
+
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
14 |
+
|
15 |
+
from transformers.activations import ACT2FN
|
16 |
+
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
|
17 |
+
from transformers.modeling_utils import PreTrainedModel
|
18 |
+
from transformers.utils import logging
|
19 |
+
from transformers.generation.utils import GenerationConfig
|
20 |
+
from transformers.generation.logits_process import LogitsProcessorList
|
21 |
+
from .configuration_zhinao import ZhinaoConfig
|
22 |
+
from .generation_utils import TextIterStreamer, make_context, OutputRepetitionPenaltyLogitsProcessor
|
23 |
+
|
24 |
+
|
25 |
+
try:
|
26 |
+
from flash_attn import flash_attn_varlen_func
|
27 |
+
from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input
|
28 |
+
except:
|
29 |
+
flash_attn_varlen_func = None
|
30 |
+
index_first_axis, pad_input, unpad_input = None, None, None
|
31 |
+
|
32 |
+
|
33 |
+
logger = logging.get_logger(__name__)
|
34 |
+
|
35 |
+
_CONFIG_FOR_DOC = "ZhinaoConfig"
|
36 |
+
|
37 |
+
|
38 |
+
def _get_unpad_data(attention_mask):
|
39 |
+
seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
|
40 |
+
indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
|
41 |
+
max_seqlen_in_batch = seqlens_in_batch.max().item()
|
42 |
+
cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0))
|
43 |
+
return (
|
44 |
+
indices,
|
45 |
+
cu_seqlens,
|
46 |
+
max_seqlen_in_batch,
|
47 |
+
)
|
48 |
+
|
49 |
+
|
50 |
+
# Copied from transformers.models.bart.modeling_bart._make_causal_mask
|
51 |
+
def _make_causal_mask(
|
52 |
+
input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
|
53 |
+
):
|
54 |
+
"""
|
55 |
+
Make causal mask used for bi-directional self-attention.
|
56 |
+
"""
|
57 |
+
bsz, tgt_len = input_ids_shape
|
58 |
+
mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device)
|
59 |
+
mask_cond = torch.arange(mask.size(-1), device=device)
|
60 |
+
mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
|
61 |
+
mask = mask.to(dtype)
|
62 |
+
|
63 |
+
if past_key_values_length > 0:
|
64 |
+
mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
|
65 |
+
return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
|
66 |
+
|
67 |
+
|
68 |
+
# Copied from transformers.models.bart.modeling_bart._expand_mask
|
69 |
+
def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
|
70 |
+
"""
|
71 |
+
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
|
72 |
+
"""
|
73 |
+
bsz, src_len = mask.size()
|
74 |
+
tgt_len = tgt_len if tgt_len is not None else src_len
|
75 |
+
|
76 |
+
expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
|
77 |
+
|
78 |
+
inverted_mask = 1.0 - expanded_mask
|
79 |
+
|
80 |
+
return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
|
81 |
+
|
82 |
+
|
83 |
+
class ZhinaoRMSNorm(nn.Module):
|
84 |
+
def __init__(self, hidden_size, eps=1e-6):
|
85 |
+
"""
|
86 |
+
ZhinaoRMSNorm is equivalent to T5LayerNorm
|
87 |
+
"""
|
88 |
+
super().__init__()
|
89 |
+
self.weight = nn.Parameter(torch.ones(hidden_size))
|
90 |
+
self.variance_epsilon = eps
|
91 |
+
|
92 |
+
def forward(self, hidden_states):
|
93 |
+
input_dtype = hidden_states.dtype
|
94 |
+
hidden_states = hidden_states.to(torch.float32)
|
95 |
+
variance = hidden_states.pow(2).mean(-1, keepdim=True)
|
96 |
+
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
|
97 |
+
return self.weight * hidden_states.to(input_dtype)
|
98 |
+
|
99 |
+
|
100 |
+
class ZhinaoRotaryEmbedding(torch.nn.Module):
|
101 |
+
def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
|
102 |
+
super().__init__()
|
103 |
+
|
104 |
+
self.dim = dim
|
105 |
+
self.max_position_embeddings = max_position_embeddings
|
106 |
+
self.base = base
|
107 |
+
inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
|
108 |
+
self.register_buffer("inv_freq", inv_freq, persistent=False)
|
109 |
+
|
110 |
+
# Build here to make `torch.jit.trace` work.
|
111 |
+
self._set_cos_sin_cache(
|
112 |
+
seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()
|
113 |
+
)
|
114 |
+
|
115 |
+
def _set_cos_sin_cache(self, seq_len, device, dtype):
|
116 |
+
self.max_seq_len_cached = seq_len
|
117 |
+
t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)
|
118 |
+
|
119 |
+
freqs = torch.einsum("i,j->ij", t, self.inv_freq)
|
120 |
+
# Different from paper, but it uses a different permutation in order to obtain the same calculation
|
121 |
+
emb = torch.cat((freqs, freqs), dim=-1)
|
122 |
+
self.register_buffer("cos_cached", emb.cos()[None, None, :, :].to(dtype), persistent=False)
|
123 |
+
self.register_buffer("sin_cached", emb.sin()[None, None, :, :].to(dtype), persistent=False)
|
124 |
+
|
125 |
+
def forward(self, x, seq_len=None):
|
126 |
+
# x: [bs, num_attention_heads, seq_len, head_size]
|
127 |
+
if seq_len > self.max_seq_len_cached:
|
128 |
+
self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype)
|
129 |
+
|
130 |
+
return (
|
131 |
+
self.cos_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
|
132 |
+
self.sin_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
|
133 |
+
)
|
134 |
+
|
135 |
+
|
136 |
+
class ZhinaoLinearScalingRotaryEmbedding(ZhinaoRotaryEmbedding):
|
137 |
+
"""ZhinaoRotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev"""
|
138 |
+
|
139 |
+
def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
|
140 |
+
self.scaling_factor = scaling_factor
|
141 |
+
super().__init__(dim, max_position_embeddings, base, device)
|
142 |
+
|
143 |
+
def _set_cos_sin_cache(self, seq_len, device, dtype):
|
144 |
+
self.max_seq_len_cached = seq_len
|
145 |
+
t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)
|
146 |
+
t = t / self.scaling_factor
|
147 |
+
|
148 |
+
freqs = torch.einsum("i,j->ij", t, self.inv_freq)
|
149 |
+
# Different from paper, but it uses a different permutation in order to obtain the same calculation
|
150 |
+
emb = torch.cat((freqs, freqs), dim=-1)
|
151 |
+
self.register_buffer("cos_cached", emb.cos()[None, None, :, :].to(dtype), persistent=False)
|
152 |
+
self.register_buffer("sin_cached", emb.sin()[None, None, :, :].to(dtype), persistent=False)
|
153 |
+
|
154 |
+
|
155 |
+
class ZhinaoDynamicNTKScalingRotaryEmbedding(ZhinaoRotaryEmbedding):
|
156 |
+
"""ZhinaoRotaryEmbedding extended with Dynamic NTK scaling. Credits to the Reddit users /u/bloc97 and /u/emozilla"""
|
157 |
+
|
158 |
+
def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
|
159 |
+
self.scaling_factor = scaling_factor
|
160 |
+
super().__init__(dim, max_position_embeddings, base, device)
|
161 |
+
|
162 |
+
def _set_cos_sin_cache(self, seq_len, device, dtype):
|
163 |
+
self.max_seq_len_cached = seq_len
|
164 |
+
|
165 |
+
if seq_len > self.max_position_embeddings:
|
166 |
+
base = self.base * (
|
167 |
+
(self.scaling_factor * seq_len / self.max_position_embeddings) - (self.scaling_factor - 1)
|
168 |
+
) ** (self.dim / (self.dim - 2))
|
169 |
+
inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
|
170 |
+
self.register_buffer("inv_freq", inv_freq, persistent=False)
|
171 |
+
|
172 |
+
t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)
|
173 |
+
|
174 |
+
freqs = torch.einsum("i,j->ij", t, self.inv_freq)
|
175 |
+
# Different from paper, but it uses a different permutation in order to obtain the same calculation
|
176 |
+
emb = torch.cat((freqs, freqs), dim=-1)
|
177 |
+
self.register_buffer("cos_cached", emb.cos()[None, None, :, :].to(dtype), persistent=False)
|
178 |
+
self.register_buffer("sin_cached", emb.sin()[None, None, :, :].to(dtype), persistent=False)
|
179 |
+
|
180 |
+
|
181 |
+
class ZhinaoNTKScalingRotaryEmbedding(torch.nn.Module):
|
182 |
+
def __init__(self, dim, max_position_embeddings=2048, base=10000, scaling_factor=100, device=None):
|
183 |
+
super().__init__()
|
184 |
+
|
185 |
+
self.dim = dim
|
186 |
+
self.max_position_embeddings = max_position_embeddings
|
187 |
+
self.base = base * scaling_factor
|
188 |
+
inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
|
189 |
+
self.register_buffer("inv_freq", inv_freq, persistent=False)
|
190 |
+
|
191 |
+
# Build here to make `torch.jit.trace` work.
|
192 |
+
self._set_cos_sin_cache(
|
193 |
+
seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()
|
194 |
+
)
|
195 |
+
|
196 |
+
def _set_cos_sin_cache(self, seq_len, device, dtype):
|
197 |
+
self.max_seq_len_cached = seq_len
|
198 |
+
t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)
|
199 |
+
freqs = torch.einsum("i,j->ij", t, self.inv_freq)
|
200 |
+
emb = torch.cat((freqs, freqs), dim=-1)
|
201 |
+
self.register_buffer("cos_cached", emb.cos()[None, None, :, :].to(dtype), persistent=False)
|
202 |
+
self.register_buffer("sin_cached", emb.sin()[None, None, :, :].to(dtype), persistent=False)
|
203 |
+
|
204 |
+
def forward(self, x, seq_len=None):
|
205 |
+
if seq_len > self.max_seq_len_cached:
|
206 |
+
self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype)
|
207 |
+
|
208 |
+
return (
|
209 |
+
self.cos_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
|
210 |
+
self.sin_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
|
211 |
+
)
|
212 |
+
|
213 |
+
|
214 |
+
def rotate_half(x):
|
215 |
+
"""Rotates half the hidden dims of the input."""
|
216 |
+
x1 = x[..., : x.shape[-1] // 2]
|
217 |
+
x2 = x[..., x.shape[-1] // 2 :]
|
218 |
+
return torch.cat((-x2, x1), dim=-1)
|
219 |
+
|
220 |
+
|
221 |
+
def apply_rotary_pos_emb(q, k, cos, sin, position_ids):
|
222 |
+
# The first two dimensions of cos and sin are always 1, so we can `squeeze` them.
|
223 |
+
cos = cos.squeeze(1).squeeze(0) # [seq_len, dim]
|
224 |
+
sin = sin.squeeze(1).squeeze(0) # [seq_len, dim]
|
225 |
+
cos = cos[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim]
|
226 |
+
sin = sin[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim]
|
227 |
+
q_embed = (q * cos) + (rotate_half(q) * sin)
|
228 |
+
k_embed = (k * cos) + (rotate_half(k) * sin)
|
229 |
+
return q_embed, k_embed
|
230 |
+
|
231 |
+
|
232 |
+
class ZhinaoMLP(nn.Module):
|
233 |
+
def __init__(self, config):
|
234 |
+
super().__init__()
|
235 |
+
self.config = config
|
236 |
+
self.hidden_size = config.hidden_size
|
237 |
+
self.intermediate_size = config.intermediate_size
|
238 |
+
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
|
239 |
+
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
|
240 |
+
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
|
241 |
+
self.act_fn = ACT2FN[config.hidden_act]
|
242 |
+
|
243 |
+
def forward(self, x):
|
244 |
+
intermediate = self.act_fn(self.gate_proj(x)) * self.up_proj(x)
|
245 |
+
down_proj = self.down_proj(intermediate)
|
246 |
+
return down_proj
|
247 |
+
|
248 |
+
|
249 |
+
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
|
250 |
+
"""
|
251 |
+
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
|
252 |
+
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
|
253 |
+
"""
|
254 |
+
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
|
255 |
+
if n_rep == 1:
|
256 |
+
return hidden_states
|
257 |
+
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
|
258 |
+
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
|
259 |
+
|
260 |
+
|
261 |
+
class ZhinaoAttention(nn.Module):
|
262 |
+
"""Multi-headed attention from 'Attention Is All You Need' paper"""
|
263 |
+
|
264 |
+
def __init__(self, config: ZhinaoConfig):
|
265 |
+
super().__init__()
|
266 |
+
self.config = config
|
267 |
+
self.hidden_size = config.hidden_size
|
268 |
+
self.num_heads = config.num_attention_heads
|
269 |
+
self.head_dim = self.hidden_size // self.num_heads
|
270 |
+
self.num_key_value_heads = config.num_key_value_heads
|
271 |
+
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
|
272 |
+
self.max_position_embeddings = config.max_position_embeddings
|
273 |
+
self.rope_theta = config.rope_theta
|
274 |
+
self.is_causal = True
|
275 |
+
self.dropout = 0.0
|
276 |
+
self.use_flash_attn = config.use_flash_attn
|
277 |
+
|
278 |
+
if (self.head_dim * self.num_heads) != self.hidden_size:
|
279 |
+
raise ValueError(
|
280 |
+
f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
|
281 |
+
f" and `num_heads`: {self.num_heads})."
|
282 |
+
)
|
283 |
+
|
284 |
+
self.qkv_hidden_size = (self.num_heads + 2 * self.num_key_value_heads) * self.head_dim
|
285 |
+
self.qkv_proj = nn.Linear(self.hidden_size, self.qkv_hidden_size, bias=True)
|
286 |
+
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
|
287 |
+
self._init_rope()
|
288 |
+
|
289 |
+
def _init_rope(self):
|
290 |
+
if self.config.rope_scaling is None:
|
291 |
+
self.rotary_emb = ZhinaoRotaryEmbedding(
|
292 |
+
self.head_dim,
|
293 |
+
max_position_embeddings=self.max_position_embeddings,
|
294 |
+
base=self.rope_theta,
|
295 |
+
)
|
296 |
+
else:
|
297 |
+
scaling_type = self.config.rope_scaling["type"]
|
298 |
+
scaling_factor = self.config.rope_scaling["factor"]
|
299 |
+
if scaling_type == "linear":
|
300 |
+
self.rotary_emb = ZhinaoLinearScalingRotaryEmbedding(
|
301 |
+
self.head_dim,
|
302 |
+
max_position_embeddings=self.max_position_embeddings,
|
303 |
+
scaling_factor=scaling_factor,
|
304 |
+
base=self.rope_theta,
|
305 |
+
)
|
306 |
+
elif scaling_type == "dynamic":
|
307 |
+
self.rotary_emb = ZhinaoDynamicNTKScalingRotaryEmbedding(
|
308 |
+
self.head_dim,
|
309 |
+
max_position_embeddings=self.max_position_embeddings,
|
310 |
+
scaling_factor=scaling_factor,
|
311 |
+
base=self.rope_theta,
|
312 |
+
)
|
313 |
+
elif scaling_type == "ntk":
|
314 |
+
self.rotary_emb = ZhinaoNTKScalingRotaryEmbedding(
|
315 |
+
self.head_dim,
|
316 |
+
max_position_embeddings=self.max_position_embeddings,
|
317 |
+
scaling_factor=scaling_factor,
|
318 |
+
base=self.rope_theta,
|
319 |
+
)
|
320 |
+
else:
|
321 |
+
raise ValueError(f"Unknown RoPE scaling type {scaling_type}")
|
322 |
+
|
323 |
+
def raw_attention(self, query_states, key_states, value_states, attention_mask):
|
324 |
+
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
|
325 |
+
|
326 |
+
if attention_mask is not None:
|
327 |
+
attn_weights = attn_weights + attention_mask
|
328 |
+
|
329 |
+
# upcast attention to fp32
|
330 |
+
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
|
331 |
+
attn_output = torch.matmul(attn_weights, value_states)
|
332 |
+
|
333 |
+
attn_output = attn_output.transpose(1, 2).contiguous()
|
334 |
+
|
335 |
+
return attn_output
|
336 |
+
|
337 |
+
def flash_attention(self, query_states, key_states, value_states, attention_mask):
|
338 |
+
# TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache
|
339 |
+
# to be able to avoid many of these transpose/reshape/view.
|
340 |
+
query_states = query_states.transpose(1, 2)
|
341 |
+
key_states = key_states.transpose(1, 2)
|
342 |
+
value_states = value_states.transpose(1, 2)
|
343 |
+
|
344 |
+
batch_size, query_length = query_states.shape[0], query_states.shape[1]
|
345 |
+
query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
|
346 |
+
query_states, key_states, value_states, attention_mask, query_length
|
347 |
+
)
|
348 |
+
|
349 |
+
cu_seqlens_q, cu_seqlens_k = cu_seq_lens
|
350 |
+
max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
|
351 |
+
|
352 |
+
attn_output_unpad = flash_attn_varlen_func(
|
353 |
+
query_states,
|
354 |
+
key_states,
|
355 |
+
value_states,
|
356 |
+
cu_seqlens_q=cu_seqlens_q,
|
357 |
+
cu_seqlens_k=cu_seqlens_k,
|
358 |
+
max_seqlen_q=max_seqlen_in_batch_q,
|
359 |
+
max_seqlen_k=max_seqlen_in_batch_k,
|
360 |
+
dropout_p=self.dropout,
|
361 |
+
softmax_scale=None,
|
362 |
+
causal=self.is_causal,
|
363 |
+
)
|
364 |
+
attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
|
365 |
+
return attn_output
|
366 |
+
|
367 |
+
def forward(
|
368 |
+
self,
|
369 |
+
hidden_states: torch.Tensor,
|
370 |
+
attention_mask: Optional[torch.Tensor] = None,
|
371 |
+
position_ids: Optional[torch.LongTensor] = None,
|
372 |
+
past_key_value: Optional[Tuple[torch.Tensor]] = None,
|
373 |
+
output_attentions: bool = False,
|
374 |
+
use_cache: bool = False,
|
375 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
376 |
+
bsz, q_len, _ = hidden_states.size()
|
377 |
+
|
378 |
+
mixed_x_layer = self.qkv_proj(hidden_states)
|
379 |
+
new_tensor_shape = mixed_x_layer.size()[:-1] + \
|
380 |
+
(self.num_key_value_heads, ((self.num_heads // self.num_key_value_heads + 2) * self.head_dim))
|
381 |
+
mixed_x_layer = mixed_x_layer.view(*new_tensor_shape)
|
382 |
+
query, key_states, value_states = torch.split(
|
383 |
+
mixed_x_layer,
|
384 |
+
[self.num_heads // self.num_key_value_heads * self.head_dim, self.head_dim, self.head_dim],
|
385 |
+
dim=3
|
386 |
+
)
|
387 |
+
# [sq, b, ng, np/ng * hn] -> [sq, b, np, hn]
|
388 |
+
query_states = query.contiguous().view(query.size(0), query.size(1), -1, self.head_dim)
|
389 |
+
|
390 |
+
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
391 |
+
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
392 |
+
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
393 |
+
|
394 |
+
kv_seq_len = key_states.shape[-2]
|
395 |
+
if past_key_value is not None:
|
396 |
+
kv_seq_len += past_key_value[0].shape[-2]
|
397 |
+
cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
|
398 |
+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
|
399 |
+
|
400 |
+
if past_key_value is not None:
|
401 |
+
# reuse k, v, self_attention
|
402 |
+
key_states = torch.cat([past_key_value[0], key_states], dim=2)
|
403 |
+
value_states = torch.cat([past_key_value[1], value_states], dim=2)
|
404 |
+
|
405 |
+
past_key_value = (key_states, value_states) if use_cache else None
|
406 |
+
|
407 |
+
# repeat k/v heads if n_kv_heads < n_heads
|
408 |
+
key_states = repeat_kv(key_states, self.num_key_value_groups)
|
409 |
+
value_states = repeat_kv(value_states, self.num_key_value_groups)
|
410 |
+
|
411 |
+
# q, k, v: [b, n, s, h]
|
412 |
+
# check attention mask
|
413 |
+
if self.use_flash_attn:
|
414 |
+
if attention_mask is not None and attention_mask.size() != (bsz, kv_seq_len):
|
415 |
+
raise ValueError(f"Attention mask should be of size {(bsz, kv_seq_len)}, but is {attention_mask.size()}")
|
416 |
+
attn_output = self.flash_attention(query_states, key_states, value_states, attention_mask)
|
417 |
+
else:
|
418 |
+
if attention_mask is not None and attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
|
419 |
+
raise ValueError(f"Attention mask should be of size {bsz, 1, q_len, kv_seq_len}, but is {attention_mask.size()}")
|
420 |
+
attn_output = self.raw_attention(query_states, key_states, value_states, attention_mask)
|
421 |
+
|
422 |
+
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
|
423 |
+
attn_output = self.o_proj(attn_output)
|
424 |
+
|
425 |
+
if not output_attentions:
|
426 |
+
attn_weights = None
|
427 |
+
|
428 |
+
return attn_output, attn_weights, past_key_value
|
429 |
+
|
430 |
+
# Copied from transformers.models.mistral.modeling_mistral.MistralFlashAttention2._upad_input
|
431 |
+
def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
|
432 |
+
batch_size, kv_seq_len, num_heads, head_dim = key_layer.shape
|
433 |
+
|
434 |
+
# On the first iteration we need to properly re-create the padding mask
|
435 |
+
# by slicing it on the proper place
|
436 |
+
if kv_seq_len != attention_mask.shape[-1]:
|
437 |
+
attention_mask_num_tokens = attention_mask.shape[-1]
|
438 |
+
attention_mask = attention_mask[:, attention_mask_num_tokens - kv_seq_len :]
|
439 |
+
|
440 |
+
indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
|
441 |
+
|
442 |
+
key_layer = index_first_axis(key_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k)
|
443 |
+
value_layer = index_first_axis(value_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k)
|
444 |
+
|
445 |
+
if query_length == kv_seq_len:
|
446 |
+
query_layer = index_first_axis(
|
447 |
+
query_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k
|
448 |
+
)
|
449 |
+
cu_seqlens_q = cu_seqlens_k
|
450 |
+
max_seqlen_in_batch_q = max_seqlen_in_batch_k
|
451 |
+
indices_q = indices_k
|
452 |
+
elif query_length == 1:
|
453 |
+
max_seqlen_in_batch_q = 1
|
454 |
+
cu_seqlens_q = torch.arange(
|
455 |
+
batch_size + 1, dtype=torch.int32, device=query_layer.device
|
456 |
+
) # There is a memcpy here, that is very bad.
|
457 |
+
indices_q = cu_seqlens_q[:-1]
|
458 |
+
query_layer = query_layer.squeeze(1)
|
459 |
+
else:
|
460 |
+
# The -q_len: slice assumes left padding.
|
461 |
+
attention_mask = attention_mask[:, -query_length:]
|
462 |
+
|
463 |
+
query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
|
464 |
+
return (
|
465 |
+
query_layer,
|
466 |
+
key_layer,
|
467 |
+
value_layer,
|
468 |
+
indices_q,
|
469 |
+
(cu_seqlens_q, cu_seqlens_k),
|
470 |
+
(max_seqlen_in_batch_q, max_seqlen_in_batch_k),
|
471 |
+
)
|
472 |
+
|
473 |
+
|
474 |
+
class ZhinaoDecoderLayer(nn.Module):
|
475 |
+
def __init__(self, config: ZhinaoConfig):
|
476 |
+
super().__init__()
|
477 |
+
self.hidden_size = config.hidden_size
|
478 |
+
|
479 |
+
self.self_attn = ZhinaoAttention(config=config)
|
480 |
+
self.mlp = ZhinaoMLP(config)
|
481 |
+
self.input_layernorm = ZhinaoRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
482 |
+
self.post_attention_layernorm = ZhinaoRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
483 |
+
|
484 |
+
def forward(
|
485 |
+
self,
|
486 |
+
hidden_states: torch.Tensor,
|
487 |
+
attention_mask: Optional[torch.Tensor] = None,
|
488 |
+
position_ids: Optional[torch.LongTensor] = None,
|
489 |
+
past_key_value: Optional[Tuple[torch.Tensor]] = None,
|
490 |
+
output_attentions: Optional[bool] = False,
|
491 |
+
use_cache: Optional[bool] = False,
|
492 |
+
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
|
493 |
+
"""
|
494 |
+
Args:
|
495 |
+
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
|
496 |
+
attention_mask (`torch.FloatTensor`, *optional*):
|
497 |
+
attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
|
498 |
+
query_sequence_length, key_sequence_length)` if default attention is used.
|
499 |
+
output_attentions (`bool`, *optional*):
|
500 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
501 |
+
returned tensors for more detail.
|
502 |
+
use_cache (`bool`, *optional*):
|
503 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
|
504 |
+
(see `past_key_values`).
|
505 |
+
past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
|
506 |
+
"""
|
507 |
+
|
508 |
+
residual = hidden_states
|
509 |
+
|
510 |
+
hidden_states = self.input_layernorm(hidden_states)
|
511 |
+
|
512 |
+
# Self Attention
|
513 |
+
hidden_states, self_attn_weights, present_key_value = self.self_attn(
|
514 |
+
hidden_states=hidden_states,
|
515 |
+
attention_mask=attention_mask,
|
516 |
+
position_ids=position_ids,
|
517 |
+
past_key_value=past_key_value,
|
518 |
+
output_attentions=output_attentions,
|
519 |
+
use_cache=use_cache,
|
520 |
+
)
|
521 |
+
hidden_states = residual + hidden_states
|
522 |
+
|
523 |
+
# Fully Connected
|
524 |
+
residual = hidden_states
|
525 |
+
hidden_states = self.post_attention_layernorm(hidden_states)
|
526 |
+
hidden_states = self.mlp(hidden_states)
|
527 |
+
hidden_states = residual + hidden_states
|
528 |
+
|
529 |
+
outputs = (hidden_states,)
|
530 |
+
|
531 |
+
if output_attentions:
|
532 |
+
outputs += (self_attn_weights,)
|
533 |
+
|
534 |
+
if use_cache:
|
535 |
+
outputs += (present_key_value,)
|
536 |
+
|
537 |
+
return outputs
|
538 |
+
|
539 |
+
|
540 |
+
class ZhinaoPreTrainedModel(PreTrainedModel):
|
541 |
+
config_class = ZhinaoConfig
|
542 |
+
base_model_prefix = "model"
|
543 |
+
supports_gradient_checkpointing = True
|
544 |
+
_no_split_modules = ["ZhinaoDecoderLayer"]
|
545 |
+
_skip_keys_device_placement = "past_key_values"
|
546 |
+
|
547 |
+
def _init_weights(self, module):
|
548 |
+
std = self.config.initializer_range
|
549 |
+
if isinstance(module, nn.Linear):
|
550 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
551 |
+
if module.bias is not None:
|
552 |
+
module.bias.data.zero_()
|
553 |
+
elif isinstance(module, nn.Embedding):
|
554 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
555 |
+
if module.padding_idx is not None:
|
556 |
+
module.weight.data[module.padding_idx].zero_()
|
557 |
+
|
558 |
+
def _set_gradient_checkpointing(self, module, value=False):
|
559 |
+
if isinstance(module, ZhinaoModel):
|
560 |
+
module.gradient_checkpointing = value
|
561 |
+
|
562 |
+
|
563 |
+
class ZhinaoModel(ZhinaoPreTrainedModel):
|
564 |
+
"""
|
565 |
+
Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`ZhinaoDecoderLayer`]
|
566 |
+
|
567 |
+
Args:
|
568 |
+
config: ZhinaoConfig
|
569 |
+
"""
|
570 |
+
|
571 |
+
def __init__(self, config: ZhinaoConfig):
|
572 |
+
super().__init__(config)
|
573 |
+
self.padding_idx = config.pad_token_id
|
574 |
+
self.vocab_size = config.vocab_size
|
575 |
+
self.use_flash_attn = config.use_flash_attn
|
576 |
+
|
577 |
+
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
|
578 |
+
self.layers = nn.ModuleList([ZhinaoDecoderLayer(config) for _ in range(config.num_hidden_layers)])
|
579 |
+
self.norm = ZhinaoRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
580 |
+
|
581 |
+
self.gradient_checkpointing = False
|
582 |
+
# Initialize weights and apply final processing
|
583 |
+
self.post_init()
|
584 |
+
|
585 |
+
def get_input_embeddings(self):
|
586 |
+
return self.embed_tokens
|
587 |
+
|
588 |
+
def set_input_embeddings(self, value):
|
589 |
+
self.embed_tokens = value
|
590 |
+
|
591 |
+
# Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask
|
592 |
+
def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
|
593 |
+
# create causal mask
|
594 |
+
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
|
595 |
+
combined_attention_mask = None
|
596 |
+
if input_shape[-1] > 1:
|
597 |
+
combined_attention_mask = _make_causal_mask(
|
598 |
+
input_shape,
|
599 |
+
inputs_embeds.dtype,
|
600 |
+
device=inputs_embeds.device,
|
601 |
+
past_key_values_length=past_key_values_length,
|
602 |
+
)
|
603 |
+
|
604 |
+
if attention_mask is not None:
|
605 |
+
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
|
606 |
+
expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to(
|
607 |
+
inputs_embeds.device
|
608 |
+
)
|
609 |
+
combined_attention_mask = (
|
610 |
+
expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
|
611 |
+
)
|
612 |
+
|
613 |
+
return combined_attention_mask
|
614 |
+
|
615 |
+
def forward(
|
616 |
+
self,
|
617 |
+
input_ids: torch.LongTensor = None,
|
618 |
+
attention_mask: Optional[torch.Tensor] = None,
|
619 |
+
position_ids: Optional[torch.LongTensor] = None,
|
620 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
621 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
622 |
+
use_cache: Optional[bool] = None,
|
623 |
+
output_attentions: Optional[bool] = None,
|
624 |
+
output_hidden_states: Optional[bool] = None,
|
625 |
+
return_dict: Optional[bool] = None,
|
626 |
+
) -> Union[Tuple, BaseModelOutputWithPast]:
|
627 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
628 |
+
output_hidden_states = (
|
629 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
630 |
+
)
|
631 |
+
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
632 |
+
|
633 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
634 |
+
|
635 |
+
# retrieve input_ids and inputs_embeds
|
636 |
+
if input_ids is not None and inputs_embeds is not None:
|
637 |
+
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
|
638 |
+
elif input_ids is not None:
|
639 |
+
batch_size, seq_length = input_ids.shape
|
640 |
+
elif inputs_embeds is not None:
|
641 |
+
batch_size, seq_length, _ = inputs_embeds.shape
|
642 |
+
else:
|
643 |
+
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
|
644 |
+
|
645 |
+
seq_length_with_past = seq_length
|
646 |
+
past_key_values_length = 0
|
647 |
+
|
648 |
+
if past_key_values is not None:
|
649 |
+
past_key_values_length = past_key_values[0][0].shape[2]
|
650 |
+
seq_length_with_past = seq_length_with_past + past_key_values_length
|
651 |
+
|
652 |
+
if position_ids is None:
|
653 |
+
device = input_ids.device if input_ids is not None else inputs_embeds.device
|
654 |
+
position_ids = torch.arange(
|
655 |
+
past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
|
656 |
+
)
|
657 |
+
position_ids = position_ids.unsqueeze(0).view(-1, seq_length)
|
658 |
+
else:
|
659 |
+
position_ids = position_ids.view(-1, seq_length).long()
|
660 |
+
|
661 |
+
if inputs_embeds is None:
|
662 |
+
inputs_embeds = self.embed_tokens(input_ids)
|
663 |
+
# embed positions
|
664 |
+
if attention_mask is None:
|
665 |
+
attention_mask = torch.ones(
|
666 |
+
(batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device
|
667 |
+
)
|
668 |
+
|
669 |
+
# (batch_size, 1, seq_length, seq_length)` if default attention is used
|
670 |
+
if not self.use_flash_attn:
|
671 |
+
attention_mask = self._prepare_decoder_attention_mask(
|
672 |
+
attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length
|
673 |
+
)
|
674 |
+
|
675 |
+
hidden_states = inputs_embeds
|
676 |
+
|
677 |
+
if self.gradient_checkpointing and self.training:
|
678 |
+
if use_cache:
|
679 |
+
logger.warning_once(
|
680 |
+
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
|
681 |
+
)
|
682 |
+
use_cache = False
|
683 |
+
|
684 |
+
# decoder layers
|
685 |
+
all_hidden_states = () if output_hidden_states else None
|
686 |
+
all_self_attns = () if output_attentions else None
|
687 |
+
next_decoder_cache = () if use_cache else None
|
688 |
+
|
689 |
+
for idx, decoder_layer in enumerate(self.layers):
|
690 |
+
if output_hidden_states:
|
691 |
+
all_hidden_states += (hidden_states,)
|
692 |
+
|
693 |
+
past_key_value = past_key_values[idx] if past_key_values is not None else None
|
694 |
+
|
695 |
+
if self.gradient_checkpointing and self.training:
|
696 |
+
|
697 |
+
def create_custom_forward(module):
|
698 |
+
def custom_forward(*inputs):
|
699 |
+
# None for past_key_value
|
700 |
+
return module(*inputs, past_key_value, output_attentions)
|
701 |
+
|
702 |
+
return custom_forward
|
703 |
+
|
704 |
+
layer_outputs = torch.utils.checkpoint.checkpoint(
|
705 |
+
create_custom_forward(decoder_layer),
|
706 |
+
hidden_states,
|
707 |
+
attention_mask,
|
708 |
+
position_ids,
|
709 |
+
)
|
710 |
+
else:
|
711 |
+
layer_outputs = decoder_layer(
|
712 |
+
hidden_states,
|
713 |
+
attention_mask=attention_mask,
|
714 |
+
position_ids=position_ids,
|
715 |
+
past_key_value=past_key_value,
|
716 |
+
output_attentions=output_attentions,
|
717 |
+
use_cache=use_cache,
|
718 |
+
)
|
719 |
+
|
720 |
+
hidden_states = layer_outputs[0]
|
721 |
+
|
722 |
+
if use_cache:
|
723 |
+
next_decoder_cache += (layer_outputs[2 if output_attentions else 1],)
|
724 |
+
|
725 |
+
if output_attentions:
|
726 |
+
all_self_attns += (layer_outputs[1],)
|
727 |
+
|
728 |
+
hidden_states = self.norm(hidden_states)
|
729 |
+
|
730 |
+
# add hidden states from the last decoder layer
|
731 |
+
if output_hidden_states:
|
732 |
+
all_hidden_states += (hidden_states,)
|
733 |
+
|
734 |
+
next_cache = next_decoder_cache if use_cache else None
|
735 |
+
if not return_dict:
|
736 |
+
return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
|
737 |
+
|
738 |
+
return BaseModelOutputWithPast(
|
739 |
+
last_hidden_state=hidden_states,
|
740 |
+
past_key_values=next_cache,
|
741 |
+
hidden_states=all_hidden_states,
|
742 |
+
attentions=all_self_attns,
|
743 |
+
)
|
744 |
+
|
745 |
+
|
746 |
+
class ZhinaoForCausalLM(ZhinaoPreTrainedModel):
|
747 |
+
_tied_weights_keys = ["lm_head.weight"]
|
748 |
+
|
749 |
+
def __init__(self, config):
|
750 |
+
super().__init__(config)
|
751 |
+
self.model = ZhinaoModel(config)
|
752 |
+
self.vocab_size = config.vocab_size
|
753 |
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
754 |
+
self.linear = nn.Linear(config.hidden_size, 1)
|
755 |
+
|
756 |
+
# Initialize weights and apply final processing
|
757 |
+
if config.bf16:
|
758 |
+
self.model.bfloat16()
|
759 |
+
self.lm_head.bfloat16()
|
760 |
+
self.linear.bfloat16()
|
761 |
+
if config.fp16:
|
762 |
+
self.model.half()
|
763 |
+
self.lm_head.half()
|
764 |
+
self.linear.half()
|
765 |
+
|
766 |
+
if config.use_flash_attn == "auto":
|
767 |
+
if flash_attn_varlen_func:
|
768 |
+
if config.bf16 or config.fp16:
|
769 |
+
logger.warn("Try importing flash-attention.")
|
770 |
+
config.use_flash_attn = True
|
771 |
+
else:
|
772 |
+
config.use_flash_attn = False
|
773 |
+
logger.warn("Flash attention will be disabled because it does NOT support fp32.")
|
774 |
+
else:
|
775 |
+
config.use_flash_attn = False
|
776 |
+
logger.warn("Please install FlashAttention first, " "e.g., with pip install flash-attn")
|
777 |
+
|
778 |
+
self.post_init()
|
779 |
+
|
780 |
+
def get_input_embeddings(self):
|
781 |
+
return self.model.embed_tokens
|
782 |
+
|
783 |
+
def set_input_embeddings(self, value):
|
784 |
+
self.model.embed_tokens = value
|
785 |
+
|
786 |
+
def get_output_embeddings(self):
|
787 |
+
return self.lm_head
|
788 |
+
|
789 |
+
def set_output_embeddings(self, new_embeddings):
|
790 |
+
self.lm_head = new_embeddings
|
791 |
+
|
792 |
+
def set_decoder(self, decoder):
|
793 |
+
self.model = decoder
|
794 |
+
|
795 |
+
def get_decoder(self):
|
796 |
+
return self.model
|
797 |
+
|
798 |
+
def forward(
|
799 |
+
self,
|
800 |
+
input_ids: torch.LongTensor = None,
|
801 |
+
attention_mask: Optional[torch.Tensor] = None,
|
802 |
+
position_ids: Optional[torch.LongTensor] = None,
|
803 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
804 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
805 |
+
labels: Optional[torch.LongTensor] = None,
|
806 |
+
use_cache: Optional[bool] = None,
|
807 |
+
output_attentions: Optional[bool] = None,
|
808 |
+
output_hidden_states: Optional[bool] = None,
|
809 |
+
return_dict: Optional[bool] = None,
|
810 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
811 |
+
|
812 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
813 |
+
output_hidden_states = (
|
814 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
815 |
+
)
|
816 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
817 |
+
input_ids = input_ids.view(-1, input_ids.size()[-1])
|
818 |
+
attention_mask = attention_mask.view(-1, attention_mask.size()[-1])
|
819 |
+
labels = labels.view(-1, labels.size()[-1])
|
820 |
+
|
821 |
+
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
|
822 |
+
outputs = self.model(
|
823 |
+
input_ids=input_ids,
|
824 |
+
attention_mask=attention_mask,
|
825 |
+
position_ids=position_ids,
|
826 |
+
past_key_values=past_key_values,
|
827 |
+
inputs_embeds=inputs_embeds,
|
828 |
+
use_cache=use_cache,
|
829 |
+
output_attentions=output_attentions,
|
830 |
+
output_hidden_states=output_hidden_states,
|
831 |
+
return_dict=return_dict,
|
832 |
+
)
|
833 |
+
|
834 |
+
hidden_states = outputs[0]
|
835 |
+
# logits = self.lm_head(hidden_states)
|
836 |
+
|
837 |
+
# # warn:Huge gpu memory
|
838 |
+
# logits = logits.float()
|
839 |
+
#
|
840 |
+
# loss = None
|
841 |
+
# if labels is not None:
|
842 |
+
# # Shift so that tokens < n predict n
|
843 |
+
# shift_logits = logits[..., :-1, :].contiguous()
|
844 |
+
# shift_labels = labels[..., 1:].contiguous()
|
845 |
+
# # Flatten the tokens
|
846 |
+
# loss_fct = CrossEntropyLoss()
|
847 |
+
# shift_logits = shift_logits.view(-1, self.config.vocab_size)
|
848 |
+
# shift_labels = shift_labels.view(-1)
|
849 |
+
# # Enable model parallelism
|
850 |
+
# shift_labels = shift_labels.to(shift_logits.device)
|
851 |
+
# loss = loss_fct(shift_logits, shift_labels)
|
852 |
+
last_hidden_state = hidden_states
|
853 |
+
batch_size, max_seq_len, hidden_size =last_hidden_state.shape[0], last_hidden_state.shape[1], last_hidden_state.shape[2]
|
854 |
+
input_attention_mask = attention_mask.clone()
|
855 |
+
non_pad_mask = input_attention_mask.sum(dim=1) - 1 #right padding, 注意这里减一是因为索引是从0开始的
|
856 |
+
list_range = list(range(input_attention_mask.shape[0]))
|
857 |
+
input_attention_mask[list_range, non_pad_mask] = False ## 198 \n
|
858 |
+
input_attention_mask[list_range, non_pad_mask-1] = False ## 151645 <|im_end|>
|
859 |
+
input_attention_mask[list_range, non_pad_mask-2] = False ## label
|
860 |
+
input_attention_mask[list_range, non_pad_mask-3] = False ## 198 \n
|
861 |
+
input_attention_mask[list_range, non_pad_mask-4] = False ## 77091 assistant
|
862 |
+
input_attention_mask[list_range, non_pad_mask-5] = False ## 151644 <|im_start|>
|
863 |
+
## 得到0123label
|
864 |
+
labels_int = labels[list_range, non_pad_mask-2] - 15 # 01234对应的id 15, 16, 17, 18, 19
|
865 |
+
#print(f"{labels[list_range, non_pad_mask]} {labels[list_range, non_pad_mask-1]} {labels[list_range, non_pad_mask-2]} {labels[list_range, non_pad_mask-3]} {labels[list_range, non_pad_mask-4]} {labels[list_range, non_pad_mask-5]}")
|
866 |
+
sent_embedding = self.pooling(last_hidden_state, input_attention_mask, "mean") ## mean or last
|
867 |
+
sent_embedding = F.normalize(sent_embedding, p=2, dim=1) #L2Norm ## batch_size * hidden_size
|
868 |
+
out_score = self.linear(sent_embedding).to(torch.float32) ## batch_size * 1
|
869 |
+
loss_func = torch.nn.MSELoss(reduction="mean")
|
870 |
+
labels_int = labels_int.view(batch_size,-1).to(torch.float32)
|
871 |
+
out_score = out_score.view(batch_size,-1)
|
872 |
+
labels_int = labels_int.view(batch_size,-1)*20 - 10
|
873 |
+
loss = loss_func(out_score, labels_int).to(torch.float32)
|
874 |
+
lm_logits = out_score
|
875 |
+
|
876 |
+
if not return_dict:
|
877 |
+
output = (logits,) + outputs[1:]
|
878 |
+
return (loss,) + output if loss is not None else output
|
879 |
+
|
880 |
+
return CausalLMOutputWithPast(
|
881 |
+
loss=loss,
|
882 |
+
logits=lm_logits,
|
883 |
+
past_key_values=outputs.past_key_values,
|
884 |
+
hidden_states=outputs.hidden_states,
|
885 |
+
attentions=outputs.attentions,
|
886 |
+
)
|
887 |
+
|
888 |
+
def pooling(self, last_hidden_state, attention_mask, pooling_method="mean"):
|
889 |
+
if pooling_method == "last":
|
890 |
+
non_pad_mask = attention_mask.sum(dim=1) - 1 #right padding, 注意这里减一是因为索引是从0开始的
|
891 |
+
embedding = last_hidden_state[range(last_hidden_state.shape[0]), non_pad_mask]
|
892 |
+
return embedding
|
893 |
+
elif pooling_method == "mean":
|
894 |
+
s = torch.sum(last_hidden_state * attention_mask.unsqueeze(-1), dim=1)
|
895 |
+
d = attention_mask.sum(dim=1, keepdim=True)
|
896 |
+
return s / d
|
897 |
+
else:
|
898 |
+
assert ValueError("Pooling method value illegal!")
|
899 |
+
|
900 |
+
def prepare_inputs_for_generation(
|
901 |
+
self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
|
902 |
+
):
|
903 |
+
if past_key_values:
|
904 |
+
input_ids = input_ids[:, -1:]
|
905 |
+
|
906 |
+
position_ids = kwargs.get("position_ids", None)
|
907 |
+
if attention_mask is not None and position_ids is None:
|
908 |
+
# create position_ids on the fly for batch generation
|
909 |
+
position_ids = attention_mask.long().cumsum(-1) - 1
|
910 |
+
position_ids.masked_fill_(attention_mask == 0, 1)
|
911 |
+
if past_key_values:
|
912 |
+
position_ids = position_ids[:, -1].unsqueeze(-1)
|
913 |
+
|
914 |
+
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
|
915 |
+
if inputs_embeds is not None and past_key_values is None:
|
916 |
+
model_inputs = {"inputs_embeds": inputs_embeds}
|
917 |
+
else:
|
918 |
+
model_inputs = {"input_ids": input_ids}
|
919 |
+
|
920 |
+
model_inputs.update(
|
921 |
+
{
|
922 |
+
"position_ids": position_ids,
|
923 |
+
"past_key_values": past_key_values,
|
924 |
+
"use_cache": kwargs.get("use_cache"),
|
925 |
+
"attention_mask": attention_mask,
|
926 |
+
}
|
927 |
+
)
|
928 |
+
return model_inputs
|
929 |
+
|
930 |
+
@staticmethod
|
931 |
+
def _reorder_cache(past_key_values, beam_idx):
|
932 |
+
reordered_past = ()
|
933 |
+
for layer_past in past_key_values:
|
934 |
+
reordered_past += (
|
935 |
+
tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
|
936 |
+
)
|
937 |
+
return reordered_past
|
938 |
+
|
939 |
+
|
940 |
+
def generate(
|
941 |
+
self,
|
942 |
+
inputs: Optional[torch.Tensor] = None,
|
943 |
+
generation_config: Optional[GenerationConfig] = None,
|
944 |
+
streamer = None,
|
945 |
+
**kwargs,
|
946 |
+
):
|
947 |
+
repetition_penalty = kwargs.pop("repetition_penalty", generation_config.repetition_penalty)
|
948 |
+
generation_config.repetition_penalty = 1.0
|
949 |
+
|
950 |
+
logits_processor = None
|
951 |
+
if repetition_penalty > 1.0:
|
952 |
+
warnings.warn("We highly recommend using OpenAI's frequency and presence penalty instead of the original repetition penalty. The original repetition penalty penalizes prompt tokens, which may lead to various potential issues. Therefore, your repetition penalty coefficient will be transformed into frequency penalty and presence penalty.", UserWarning)
|
953 |
+
presence_penalty = repetition_penalty - 1.0
|
954 |
+
frequency_penalty = repetition_penalty - 1.0
|
955 |
+
logits_processor = LogitsProcessorList(
|
956 |
+
[OutputRepetitionPenaltyLogitsProcessor(inputs.size(1), presence_penalty, frequency_penalty, 1.0)]
|
957 |
+
)
|
958 |
+
|
959 |
+
response = super().generate(
|
960 |
+
inputs,
|
961 |
+
generation_config=generation_config,
|
962 |
+
logits_processor=logits_processor,
|
963 |
+
streamer=streamer,
|
964 |
+
**kwargs,
|
965 |
+
)
|
966 |
+
generation_config.repetition_penalty = repetition_penalty
|
967 |
+
return response
|
968 |
+
|
969 |
+
|
970 |
+
def chat(
|
971 |
+
self,
|
972 |
+
tokenizer,
|
973 |
+
messages: List[dict],
|
974 |
+
system: str = "You are a helpful assistant.",
|
975 |
+
stream=False,
|
976 |
+
generation_config: Optional[GenerationConfig]=None):
|
977 |
+
|
978 |
+
generation_config = generation_config or self.generation_config
|
979 |
+
input_ids = make_context(
|
980 |
+
model=self, tokenizer=tokenizer, messages=messages,
|
981 |
+
system=system, max_new_tokens=generation_config.max_new_tokens
|
982 |
+
)
|
983 |
+
|
984 |
+
if stream:
|
985 |
+
streamer = TextIterStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
|
986 |
+
Thread(target=self.generate, kwargs=dict(
|
987 |
+
inputs=input_ids, streamer=streamer,
|
988 |
+
generation_config=generation_config,
|
989 |
+
)).start()
|
990 |
+
return streamer
|
991 |
+
else:
|
992 |
+
outputs = self.generate(input_ids, generation_config=generation_config)
|
993 |
+
response = tokenizer.decode(outputs[0][len(input_ids[0]):], skip_special_tokens=True)
|
994 |
+
|
995 |
+
return response
|
996 |
+
|
997 |
+
|
998 |
+
class ZhinaoForSequenceClassification(ZhinaoPreTrainedModel):
|
999 |
+
def __init__(self, config):
|
1000 |
+
super().__init__(config)
|
1001 |
+
self.num_labels = config.num_labels
|
1002 |
+
self.model = ZhinaoModel(config)
|
1003 |
+
self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
|
1004 |
+
|
1005 |
+
# Initialize weights and apply final processing
|
1006 |
+
self.post_init()
|
1007 |
+
|
1008 |
+
def get_input_embeddings(self):
|
1009 |
+
return self.model.embed_tokens
|
1010 |
+
|
1011 |
+
def set_input_embeddings(self, value):
|
1012 |
+
self.model.embed_tokens = value
|
1013 |
+
|
1014 |
+
def forward(
|
1015 |
+
self,
|
1016 |
+
input_ids: torch.LongTensor = None,
|
1017 |
+
attention_mask: Optional[torch.Tensor] = None,
|
1018 |
+
position_ids: Optional[torch.LongTensor] = None,
|
1019 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
1020 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
1021 |
+
labels: Optional[torch.LongTensor] = None,
|
1022 |
+
use_cache: Optional[bool] = None,
|
1023 |
+
output_attentions: Optional[bool] = None,
|
1024 |
+
output_hidden_states: Optional[bool] = None,
|
1025 |
+
return_dict: Optional[bool] = None,
|
1026 |
+
) -> Union[Tuple, SequenceClassifierOutputWithPast]:
|
1027 |
+
|
1028 |
+
|
1029 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1030 |
+
|
1031 |
+
transformer_outputs = self.model(
|
1032 |
+
input_ids,
|
1033 |
+
attention_mask=attention_mask,
|
1034 |
+
position_ids=position_ids,
|
1035 |
+
past_key_values=past_key_values,
|
1036 |
+
inputs_embeds=inputs_embeds,
|
1037 |
+
use_cache=use_cache,
|
1038 |
+
output_attentions=output_attentions,
|
1039 |
+
output_hidden_states=output_hidden_states,
|
1040 |
+
return_dict=return_dict,
|
1041 |
+
)
|
1042 |
+
hidden_states = transformer_outputs[0]
|
1043 |
+
logits = self.score(hidden_states)
|
1044 |
+
|
1045 |
+
if input_ids is not None:
|
1046 |
+
batch_size = input_ids.shape[0]
|
1047 |
+
else:
|
1048 |
+
batch_size = inputs_embeds.shape[0]
|
1049 |
+
|
1050 |
+
if self.config.pad_token_id is None and batch_size != 1:
|
1051 |
+
raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
|
1052 |
+
if self.config.pad_token_id is None:
|
1053 |
+
sequence_lengths = -1
|
1054 |
+
else:
|
1055 |
+
if input_ids is not None:
|
1056 |
+
sequence_lengths = (torch.eq(input_ids, self.config.pad_token_id).long().argmax(-1) - 1).to(
|
1057 |
+
logits.device
|
1058 |
+
)
|
1059 |
+
else:
|
1060 |
+
sequence_lengths = -1
|
1061 |
+
|
1062 |
+
pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
|
1063 |
+
|
1064 |
+
loss = None
|
1065 |
+
if labels is not None:
|
1066 |
+
labels = labels.to(logits.device)
|
1067 |
+
if self.config.problem_type is None:
|
1068 |
+
if self.num_labels == 1:
|
1069 |
+
self.config.problem_type = "regression"
|
1070 |
+
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
|
1071 |
+
self.config.problem_type = "single_label_classification"
|
1072 |
+
else:
|
1073 |
+
self.config.problem_type = "multi_label_classification"
|
1074 |
+
|
1075 |
+
if self.config.problem_type == "regression":
|
1076 |
+
loss_fct = MSELoss()
|
1077 |
+
if self.num_labels == 1:
|
1078 |
+
loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
|
1079 |
+
else:
|
1080 |
+
loss = loss_fct(pooled_logits, labels)
|
1081 |
+
elif self.config.problem_type == "single_label_classification":
|
1082 |
+
loss_fct = CrossEntropyLoss()
|
1083 |
+
loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
|
1084 |
+
elif self.config.problem_type == "multi_label_classification":
|
1085 |
+
loss_fct = BCEWithLogitsLoss()
|
1086 |
+
loss = loss_fct(pooled_logits, labels)
|
1087 |
+
if not return_dict:
|
1088 |
+
output = (pooled_logits,) + transformer_outputs[1:]
|
1089 |
+
return ((loss,) + output) if loss is not None else output
|
1090 |
+
|
1091 |
+
return SequenceClassifierOutputWithPast(
|
1092 |
+
loss=loss,
|
1093 |
+
logits=pooled_logits,
|
1094 |
+
past_key_values=transformer_outputs.past_key_values,
|
1095 |
+
hidden_states=transformer_outputs.hidden_states,
|
1096 |
+
attentions=transformer_outputs.attentions,
|
1097 |
+
)
|
pytorch_model.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:633b9fe5d1024d306777843f465188b1875561ed96ff3cc9253e12c312c1f38e
|
3 |
+
size 3764951134
|
rng_state_0.pth
CHANGED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:79a96f7090940d37874086998bddc52ea4620134bd07fd8a63da4c1b2a998080
|
3 |
+
size 15920
|
rng_state_2.pth
CHANGED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b0666ffafc2c5dfd369aa7e773cf83f121cc324e732bd49f7a9fa17410a64902
|
3 |
+
size 15920
|
rng_state_3.pth
CHANGED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b6f449dd085fdfc9debd709a93d2a07dc7826cb5f39e158fe6203980298f6f03
|
3 |
+
size 15920
|
rng_state_4.pth
CHANGED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:847425f478a003ce68ee5b96e9dfcaddeba4f7b548b95dba31210f238102df4a
|
3 |
+
size 15920
|
rng_state_5.pth
CHANGED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4934e7e3be93abd4f43c9e4acefd715c1e48bdd6b4be8b4154286f1b36f4182e
|
3 |
+
size 15920
|
rng_state_7.pth
CHANGED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c12dcfaa5614e36e51e49c6fa5c16c3dbc8f841a60ada5749cd7fd4626404216
|
3 |
+
size 15920
|
special_tokens_map.json
CHANGED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"pad_token": "<eod>"
|
3 |
+
}
|
tokenization_zhinao.py
CHANGED
@@ -0,0 +1,279 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import torch
|
3 |
+
import base64
|
4 |
+
import tiktoken
|
5 |
+
from typing import Collection, Optional, Dict, List, Set, Tuple, Union
|
6 |
+
from transformers import PreTrainedTokenizer
|
7 |
+
from transformers.utils import PaddingStrategy
|
8 |
+
from transformers.tokenization_utils import PreTrainedTokenizer
|
9 |
+
|
10 |
+
|
11 |
+
PAT_STR = r"""(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\r\n\p{L}\p{N}]?\p{L}+|\p{N}| ?[^\s\p{L}\p{N}]+[\r\n]*|\s*[\r\n]+|\s+(?!\S)|\s+"""
|
12 |
+
|
13 |
+
|
14 |
+
class SPTokenizer:
|
15 |
+
def __init__(self, model_path):
|
16 |
+
self.vocab_file = model_path
|
17 |
+
self.pad_token = '<pad>'
|
18 |
+
self.unk_token = '<unk>'
|
19 |
+
self.mask_token = '<mask>'
|
20 |
+
self.eod_token = '<eod>'
|
21 |
+
self.eop_token = '<eop>'
|
22 |
+
self.im_start_token = '<|im_start|>'
|
23 |
+
self.im_end_token = '<|im_end|>'
|
24 |
+
|
25 |
+
## special_tokens
|
26 |
+
self.SPECIAL_TOKENS = (
|
27 |
+
self.pad_token,
|
28 |
+
self.unk_token,
|
29 |
+
self.mask_token,
|
30 |
+
self.eod_token,
|
31 |
+
self.eop_token,
|
32 |
+
'[space2]', '[space3]', '[space4]', '[space8]',
|
33 |
+
self.im_start_token, self.im_end_token
|
34 |
+
)
|
35 |
+
self.bulid_tokenizer()
|
36 |
+
self.out = self.output_core_token()
|
37 |
+
|
38 |
+
self.token2strs = {
|
39 |
+
"[space2]": " ",
|
40 |
+
"[space3]": " ",
|
41 |
+
"[space4]": " ",
|
42 |
+
"[space8]": " ",
|
43 |
+
}
|
44 |
+
self.str2tokens = {v: k for k, v in self.token2strs.items()}
|
45 |
+
self.sorted_strs = sorted(list(self.str2tokens.keys()),
|
46 |
+
key=lambda x: len(x), reverse=True)
|
47 |
+
|
48 |
+
## skip_special_tokens
|
49 |
+
self.decode_skip_special_tokens = [
|
50 |
+
self.pad_token,
|
51 |
+
self.unk_token,
|
52 |
+
self.mask_token,
|
53 |
+
self.eod_token,
|
54 |
+
self.eop_token,
|
55 |
+
self.im_start_token,
|
56 |
+
self.im_end_token]
|
57 |
+
self.decode_skip_special_tokens_ids = [self.convert_token_to_id(token) for token in self.decode_skip_special_tokens]
|
58 |
+
|
59 |
+
def _load_tiktoken_bpe(self, tiktoken_bpe_file: str):
|
60 |
+
with open(tiktoken_bpe_file, "rb") as f:
|
61 |
+
contents = f.read()
|
62 |
+
return {
|
63 |
+
base64.b64decode(token): int(rank)
|
64 |
+
for token, rank in (line.split() for line in contents.splitlines() if line)
|
65 |
+
}
|
66 |
+
|
67 |
+
def bulid_tokenizer(self):
|
68 |
+
mergeable_ranks = self._load_tiktoken_bpe(self.vocab_file)
|
69 |
+
special_tokens = {
|
70 |
+
token: index
|
71 |
+
for index, token in enumerate(
|
72 |
+
self.SPECIAL_TOKENS, start=len(mergeable_ranks)
|
73 |
+
)
|
74 |
+
}
|
75 |
+
encode = tiktoken.Encoding(
|
76 |
+
"zhinao",
|
77 |
+
pat_str=PAT_STR,
|
78 |
+
mergeable_ranks=mergeable_ranks,
|
79 |
+
special_tokens=special_tokens
|
80 |
+
)
|
81 |
+
decoder = {v: k for k, v in mergeable_ranks.items()}
|
82 |
+
decoder.update({v: k for k, v in special_tokens.items()})
|
83 |
+
decoder_token2id = {v: k for k, v in decoder.items()}
|
84 |
+
|
85 |
+
self.tokenizer = encode
|
86 |
+
self.decoder = decoder
|
87 |
+
self.decoder_token2id = decoder_token2id
|
88 |
+
self.num_tokens = len(mergeable_ranks) + len(self.SPECIAL_TOKENS)
|
89 |
+
|
90 |
+
def output_core_token(self):
|
91 |
+
"""output special tokens"""
|
92 |
+
out = {}
|
93 |
+
for t in self.SPECIAL_TOKENS:
|
94 |
+
out[t] = self.convert_token_to_id(t)
|
95 |
+
return out
|
96 |
+
|
97 |
+
def tokenize(
|
98 |
+
self,
|
99 |
+
text,
|
100 |
+
allowed_special: Union[Set, str] = "all",
|
101 |
+
disallowed_special: Union[Collection, str] = ()):
|
102 |
+
tokens = []
|
103 |
+
text = self.convert(text)
|
104 |
+
for idx in self.tokenizer.encode(text, allowed_special=allowed_special, disallowed_special=disallowed_special):
|
105 |
+
tokens.append(self.decoder[idx])
|
106 |
+
return tokens
|
107 |
+
|
108 |
+
def encode(self, text, allowed_special="all", disallowed_special=()):
|
109 |
+
"""text to id"""
|
110 |
+
text = self.convert(text)
|
111 |
+
return self.tokenizer.encode(text, allowed_special=allowed_special, disallowed_special=disallowed_special)
|
112 |
+
|
113 |
+
def decode(self, ids, errors="replace"):
|
114 |
+
"""id to text"""
|
115 |
+
text = self.tokenizer.decode(ids, errors=errors)
|
116 |
+
return self.deconvert(text)
|
117 |
+
|
118 |
+
def decode_tokens(self, tokens: List[str]) -> str:
|
119 |
+
"""
|
120 |
+
Converts a sequence of tokens in a single string.
|
121 |
+
"""
|
122 |
+
text = ""
|
123 |
+
temp = b""
|
124 |
+
for t in tokens:
|
125 |
+
if isinstance(t, str):
|
126 |
+
if temp:
|
127 |
+
text += temp.decode("utf-8", errors="replace")
|
128 |
+
temp = b""
|
129 |
+
text += t
|
130 |
+
elif isinstance(t, bytes):
|
131 |
+
temp += t
|
132 |
+
else:
|
133 |
+
raise TypeError("token should only be of type bytes or str")
|
134 |
+
if temp:
|
135 |
+
text += temp.decode("utf-8", errors="replace")
|
136 |
+
return self.deconvert(text)
|
137 |
+
|
138 |
+
def convert_id_to_token(self, idx):
|
139 |
+
return self.decoder[idx]
|
140 |
+
|
141 |
+
def convert_token_to_id(self, token):
|
142 |
+
return self.decoder_token2id[token]
|
143 |
+
|
144 |
+
def convert(self, text):
|
145 |
+
"""将文本的特殊字符转换成特殊token"""
|
146 |
+
for k in ["[br]", "<br>"]:
|
147 |
+
text = text.replace(k, "\n")
|
148 |
+
for k in self.sorted_strs:
|
149 |
+
if k in text:
|
150 |
+
text = text.replace(k, self.str2tokens[k])
|
151 |
+
return text
|
152 |
+
|
153 |
+
def deconvert(self, text):
|
154 |
+
"""将解码文本恢复原始字符"""
|
155 |
+
for t in self.token2strs:
|
156 |
+
if t in text:
|
157 |
+
text = text.replace(t, self.token2strs[t])
|
158 |
+
return text
|
159 |
+
|
160 |
+
|
161 |
+
class ZhinaoTokenizer(PreTrainedTokenizer):
|
162 |
+
vocab_files_names = {"vocab_file": "vocab/360.tiktoken"}
|
163 |
+
model_input_names = ["input_ids", "attention_mask"]
|
164 |
+
|
165 |
+
def __init__(self, vocab_file, padding_side="left", clean_up_tokenization_spaces=False, **kwargs):
|
166 |
+
self.name = "ZhinaoTokenizer"
|
167 |
+
self.errors = "replace"
|
168 |
+
self.vocab_file = vocab_file
|
169 |
+
self.tokenizer = SPTokenizer(model_path=vocab_file)
|
170 |
+
try:
|
171 |
+
kwargs.pop('eos_token')
|
172 |
+
kwargs.pop('pad_token')
|
173 |
+
kwargs.pop('unk_token')
|
174 |
+
except:
|
175 |
+
pass
|
176 |
+
super().__init__(padding_side=padding_side, clean_up_tokenization_spaces=clean_up_tokenization_spaces, **kwargs)
|
177 |
+
self.pad_token_id = self.tokenizer.convert_token_to_id(self.tokenizer.pad_token)
|
178 |
+
self.eod_id = self.tokenizer.convert_token_to_id(self.tokenizer.eod_token)
|
179 |
+
self.im_start_id = self.tokenizer.convert_token_to_id(self.tokenizer.im_start_token)
|
180 |
+
self.im_end_id = self.tokenizer.convert_token_to_id(self.tokenizer.im_end_token)
|
181 |
+
from icecream import ic
|
182 |
+
ic(
|
183 |
+
self.eos_token_id,
|
184 |
+
self.pad_token_id,
|
185 |
+
self.im_start_id,
|
186 |
+
self.im_end_id)
|
187 |
+
|
188 |
+
@property
|
189 |
+
def unk_token(self) -> str:
|
190 |
+
return self.tokenizer.unk_token
|
191 |
+
|
192 |
+
@property
|
193 |
+
def pad_token(self) -> str:
|
194 |
+
return self.tokenizer.pad_token
|
195 |
+
|
196 |
+
@property
|
197 |
+
def eos_token(self) -> str:
|
198 |
+
return self.tokenizer.eod_token
|
199 |
+
|
200 |
+
@property
|
201 |
+
def eos_token_id(self):
|
202 |
+
return self.tokenizer.convert_token_to_id(self.tokenizer.eod_token)
|
203 |
+
|
204 |
+
@property
|
205 |
+
def eop_token(self) -> str:
|
206 |
+
return self.tokenizer.eop_token
|
207 |
+
|
208 |
+
@property
|
209 |
+
def eop_token_id(self):
|
210 |
+
return self.tokenizer.convert_token_to_id(self.tokenizer.eop_token)
|
211 |
+
|
212 |
+
@property
|
213 |
+
def vocab_size(self):
|
214 |
+
return self.tokenizer.num_tokens
|
215 |
+
|
216 |
+
def get_vocab(self):
|
217 |
+
""" Returns vocab as a dict """
|
218 |
+
vocab = {self._convert_id_to_token(i): i for i in range(self.vocab_size)}
|
219 |
+
vocab.update(self.added_tokens_encoder)
|
220 |
+
return vocab
|
221 |
+
|
222 |
+
def tokenize(
|
223 |
+
self,
|
224 |
+
text: str,
|
225 |
+
allowed_special: Union[Set, str] = "all",
|
226 |
+
disallowed_special: Union[Collection, str] = (),
|
227 |
+
) -> List[Union[bytes, str]]:
|
228 |
+
tokens = []
|
229 |
+
for t in self.tokenizer.encode(
|
230 |
+
text, allowed_special=allowed_special, disallowed_special=disallowed_special
|
231 |
+
):
|
232 |
+
tokens.append(self.tokenizer.decoder[t])
|
233 |
+
return tokens
|
234 |
+
|
235 |
+
def _decode(
|
236 |
+
self,
|
237 |
+
token_ids: Union[int, List[int]],
|
238 |
+
skip_special_tokens: bool = False,
|
239 |
+
errors: str = None,
|
240 |
+
**kwargs,
|
241 |
+
) -> str:
|
242 |
+
if isinstance(token_ids, int):
|
243 |
+
token_ids = [token_ids]
|
244 |
+
if skip_special_tokens:
|
245 |
+
token_ids = [i for i in token_ids if i not in self.tokenizer.decode_skip_special_tokens_ids]
|
246 |
+
return self.tokenizer.decode(token_ids, errors=errors or self.errors)
|
247 |
+
|
248 |
+
def _tokenize(self, text, **kwargs):
|
249 |
+
raise NotImplementedError
|
250 |
+
|
251 |
+
def _convert_token_to_id(self, token):
|
252 |
+
""" Converts a token (str) in an id using the vocab. """
|
253 |
+
return self.tokenizer.convert_token_to_id(token)
|
254 |
+
|
255 |
+
def _convert_id_to_token(self, index):
|
256 |
+
"""Converts an index (integer) in a token (str) using the vocab. """
|
257 |
+
return self.tokenizer.convert_id_to_token(index)
|
258 |
+
|
259 |
+
def convert_tokens_to_string(self, tokens: List[str]) -> str:
|
260 |
+
"""
|
261 |
+
Converts a sequence of tokens in a single string.
|
262 |
+
"""
|
263 |
+
return self.tokenizer.decode_tokens(tokens)
|
264 |
+
|
265 |
+
def save_vocabulary(self, save_directory, filename_prefix=None):
|
266 |
+
"""Save only the vocabulary of the tokenizer (vocabulary). """
|
267 |
+
if os.path.isdir(save_directory):
|
268 |
+
vocab_file = os.path.join(save_directory, self.vocab_files_names["vocab_file"])
|
269 |
+
else:
|
270 |
+
vocab_file = save_directory
|
271 |
+
|
272 |
+
with open(self.vocab_file, 'rb') as fin:
|
273 |
+
proto_str = fin.read()
|
274 |
+
|
275 |
+
os.makedirs(save_directory + "/vocab", exist_ok=True)
|
276 |
+
with open(vocab_file, "wb") as writer:
|
277 |
+
writer.write(proto_str)
|
278 |
+
|
279 |
+
return (vocab_file,)
|
tokenizer_config.json
CHANGED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"added_tokens_decoder": {},
|
3 |
+
"additional_special_tokens": [],
|
4 |
+
"auto_map": {
|
5 |
+
"AutoTokenizer": [
|
6 |
+
"tokenization_zhinao.ZhinaoTokenizer",
|
7 |
+
null
|
8 |
+
]
|
9 |
+
},
|
10 |
+
"clean_up_tokenization_spaces": false,
|
11 |
+
"do_lower_case": false,
|
12 |
+
"model_max_length": 1024,
|
13 |
+
"padding_side": "right",
|
14 |
+
"remove_space": false,
|
15 |
+
"tokenizer_class": "ZhinaoTokenizer",
|
16 |
+
"tokenizer_file": null
|
17 |
+
}
|
training_args.bin
CHANGED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4d5b02d1dbdf756f53047e8bf38c28b2e82cf5bb7162ae1f47e87c5f1c366109
|
3 |
+
size 7288
|
vocab/360.tiktoken
ADDED
The diff for this file is too large to render.
See raw diff
|
|
zero_to_fp32.py
CHANGED
@@ -0,0 +1,592 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
|
3 |
+
# Copyright (c) Microsoft Corporation.
|
4 |
+
# SPDX-License-Identifier: Apache-2.0
|
5 |
+
|
6 |
+
# DeepSpeed Team
|
7 |
+
|
8 |
+
# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
|
9 |
+
# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
|
10 |
+
# the future. Once extracted, the weights don't require DeepSpeed and can be used in any
|
11 |
+
# application.
|
12 |
+
#
|
13 |
+
# example: python zero_to_fp32.py . pytorch_model.bin
|
14 |
+
|
15 |
+
import argparse
|
16 |
+
import torch
|
17 |
+
import glob
|
18 |
+
import math
|
19 |
+
import os
|
20 |
+
import re
|
21 |
+
from collections import OrderedDict
|
22 |
+
from dataclasses import dataclass
|
23 |
+
|
24 |
+
# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
|
25 |
+
# DeepSpeed data structures it has to be available in the current python environment.
|
26 |
+
from deepspeed.utils import logger
|
27 |
+
from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
|
28 |
+
FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
|
29 |
+
FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
|
30 |
+
|
31 |
+
|
32 |
+
@dataclass
|
33 |
+
class zero_model_state:
|
34 |
+
buffers: dict()
|
35 |
+
param_shapes: dict()
|
36 |
+
shared_params: list
|
37 |
+
ds_version: int
|
38 |
+
frozen_param_shapes: dict()
|
39 |
+
frozen_param_fragments: dict()
|
40 |
+
|
41 |
+
|
42 |
+
debug = 0
|
43 |
+
|
44 |
+
# load to cpu
|
45 |
+
device = torch.device('cpu')
|
46 |
+
|
47 |
+
|
48 |
+
def atoi(text):
|
49 |
+
return int(text) if text.isdigit() else text
|
50 |
+
|
51 |
+
|
52 |
+
def natural_keys(text):
|
53 |
+
'''
|
54 |
+
alist.sort(key=natural_keys) sorts in human order
|
55 |
+
http://nedbatchelder.com/blog/200712/human_sorting.html
|
56 |
+
(See Toothy's implementation in the comments)
|
57 |
+
'''
|
58 |
+
return [atoi(c) for c in re.split(r'(\d+)', text)]
|
59 |
+
|
60 |
+
|
61 |
+
def get_model_state_file(checkpoint_dir, zero_stage):
|
62 |
+
if not os.path.isdir(checkpoint_dir):
|
63 |
+
raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
|
64 |
+
|
65 |
+
# there should be only one file
|
66 |
+
if zero_stage <= 2:
|
67 |
+
file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
|
68 |
+
elif zero_stage == 3:
|
69 |
+
file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
|
70 |
+
|
71 |
+
if not os.path.exists(file):
|
72 |
+
raise FileNotFoundError(f"can't find model states file at '{file}'")
|
73 |
+
|
74 |
+
return file
|
75 |
+
|
76 |
+
|
77 |
+
def get_checkpoint_files(checkpoint_dir, glob_pattern):
|
78 |
+
# XXX: need to test that this simple glob rule works for multi-node setup too
|
79 |
+
ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
|
80 |
+
|
81 |
+
if len(ckpt_files) == 0:
|
82 |
+
raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
|
83 |
+
|
84 |
+
return ckpt_files
|
85 |
+
|
86 |
+
|
87 |
+
def get_optim_files(checkpoint_dir):
|
88 |
+
return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
|
89 |
+
|
90 |
+
|
91 |
+
def get_model_state_files(checkpoint_dir):
|
92 |
+
return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
|
93 |
+
|
94 |
+
|
95 |
+
def parse_model_states(files):
|
96 |
+
zero_model_states = []
|
97 |
+
for file in files:
|
98 |
+
state_dict = torch.load(file, map_location=device)
|
99 |
+
|
100 |
+
if BUFFER_NAMES not in state_dict:
|
101 |
+
raise ValueError(f"{file} is not a model state checkpoint")
|
102 |
+
buffer_names = state_dict[BUFFER_NAMES]
|
103 |
+
if debug:
|
104 |
+
print("Found buffers:", buffer_names)
|
105 |
+
|
106 |
+
# recover just the buffers while restoring them to fp32 if they were saved in fp16
|
107 |
+
buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
|
108 |
+
param_shapes = state_dict[PARAM_SHAPES]
|
109 |
+
|
110 |
+
# collect parameters that are included in param_shapes
|
111 |
+
param_names = []
|
112 |
+
for s in param_shapes:
|
113 |
+
for name in s.keys():
|
114 |
+
param_names.append(name)
|
115 |
+
|
116 |
+
# update with frozen parameters
|
117 |
+
frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
|
118 |
+
if frozen_param_shapes is not None:
|
119 |
+
if debug:
|
120 |
+
print(f"Found frozen_param_shapes: {frozen_param_shapes}")
|
121 |
+
param_names += list(frozen_param_shapes.keys())
|
122 |
+
|
123 |
+
# handle shared params
|
124 |
+
shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
|
125 |
+
|
126 |
+
ds_version = state_dict.get(DS_VERSION, None)
|
127 |
+
|
128 |
+
frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
|
129 |
+
|
130 |
+
z_model_state = zero_model_state(buffers=buffers,
|
131 |
+
param_shapes=param_shapes,
|
132 |
+
shared_params=shared_params,
|
133 |
+
ds_version=ds_version,
|
134 |
+
frozen_param_shapes=frozen_param_shapes,
|
135 |
+
frozen_param_fragments=frozen_param_fragments)
|
136 |
+
zero_model_states.append(z_model_state)
|
137 |
+
|
138 |
+
return zero_model_states
|
139 |
+
|
140 |
+
|
141 |
+
def parse_optim_states(files, ds_checkpoint_dir):
|
142 |
+
|
143 |
+
total_files = len(files)
|
144 |
+
state_dicts = []
|
145 |
+
for f in files:
|
146 |
+
state_dict = torch.load(f, map_location=device)
|
147 |
+
# immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
|
148 |
+
# and also handle the case where it was already removed by another helper script
|
149 |
+
state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
|
150 |
+
state_dicts.append(state_dict)
|
151 |
+
|
152 |
+
if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
|
153 |
+
raise ValueError(f"{files[0]} is not a zero checkpoint")
|
154 |
+
zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
|
155 |
+
world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
|
156 |
+
|
157 |
+
# For ZeRO-2 each param group can have different partition_count as data parallelism for expert
|
158 |
+
# parameters can be different from data parallelism for non-expert parameters. So we can just
|
159 |
+
# use the max of the partition_count to get the dp world_size.
|
160 |
+
|
161 |
+
if type(world_size) is list:
|
162 |
+
world_size = max(world_size)
|
163 |
+
|
164 |
+
if world_size != total_files:
|
165 |
+
raise ValueError(
|
166 |
+
f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
|
167 |
+
"Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
|
168 |
+
)
|
169 |
+
|
170 |
+
# the groups are named differently in each stage
|
171 |
+
if zero_stage <= 2:
|
172 |
+
fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
|
173 |
+
elif zero_stage == 3:
|
174 |
+
fp32_groups_key = FP32_FLAT_GROUPS
|
175 |
+
else:
|
176 |
+
raise ValueError(f"unknown zero stage {zero_stage}")
|
177 |
+
|
178 |
+
if zero_stage <= 2:
|
179 |
+
fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
|
180 |
+
elif zero_stage == 3:
|
181 |
+
# if there is more than one param group, there will be multiple flattened tensors - one
|
182 |
+
# flattened tensor per group - for simplicity merge them into a single tensor
|
183 |
+
#
|
184 |
+
# XXX: could make the script more memory efficient for when there are multiple groups - it
|
185 |
+
# will require matching the sub-lists of param_shapes for each param group flattened tensor
|
186 |
+
|
187 |
+
fp32_flat_groups = [
|
188 |
+
torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts))
|
189 |
+
]
|
190 |
+
|
191 |
+
return zero_stage, world_size, fp32_flat_groups
|
192 |
+
|
193 |
+
|
194 |
+
def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir):
|
195 |
+
"""
|
196 |
+
Returns fp32 state_dict reconstructed from ds checkpoint
|
197 |
+
|
198 |
+
Args:
|
199 |
+
- ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
|
200 |
+
|
201 |
+
"""
|
202 |
+
print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
|
203 |
+
|
204 |
+
optim_files = get_optim_files(ds_checkpoint_dir)
|
205 |
+
zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
|
206 |
+
print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
|
207 |
+
|
208 |
+
model_files = get_model_state_files(ds_checkpoint_dir)
|
209 |
+
|
210 |
+
zero_model_states = parse_model_states(model_files)
|
211 |
+
print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
|
212 |
+
|
213 |
+
if zero_stage <= 2:
|
214 |
+
return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states)
|
215 |
+
elif zero_stage == 3:
|
216 |
+
return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states)
|
217 |
+
|
218 |
+
|
219 |
+
def _zero2_merge_frozen_params(state_dict, zero_model_states):
|
220 |
+
if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
|
221 |
+
return
|
222 |
+
|
223 |
+
frozen_param_shapes = zero_model_states[0].frozen_param_shapes
|
224 |
+
frozen_param_fragments = zero_model_states[0].frozen_param_fragments
|
225 |
+
|
226 |
+
if debug:
|
227 |
+
num_elem = sum(s.numel() for s in frozen_param_shapes.values())
|
228 |
+
print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
|
229 |
+
|
230 |
+
wanted_params = len(frozen_param_shapes)
|
231 |
+
wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
|
232 |
+
avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
|
233 |
+
print(f'Frozen params: Have {avail_numel} numels to process.')
|
234 |
+
print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
|
235 |
+
|
236 |
+
total_params = 0
|
237 |
+
total_numel = 0
|
238 |
+
for name, shape in frozen_param_shapes.items():
|
239 |
+
total_params += 1
|
240 |
+
unpartitioned_numel = shape.numel()
|
241 |
+
total_numel += unpartitioned_numel
|
242 |
+
|
243 |
+
state_dict[name] = frozen_param_fragments[name]
|
244 |
+
|
245 |
+
if debug:
|
246 |
+
print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
|
247 |
+
|
248 |
+
print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
|
249 |
+
|
250 |
+
|
251 |
+
def _has_callable(obj, fn):
|
252 |
+
attr = getattr(obj, fn, None)
|
253 |
+
return callable(attr)
|
254 |
+
|
255 |
+
|
256 |
+
def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
|
257 |
+
param_shapes = zero_model_states[0].param_shapes
|
258 |
+
|
259 |
+
# Reconstruction protocol:
|
260 |
+
#
|
261 |
+
# XXX: document this
|
262 |
+
|
263 |
+
if debug:
|
264 |
+
for i in range(world_size):
|
265 |
+
for j in range(len(fp32_flat_groups[0])):
|
266 |
+
print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
|
267 |
+
|
268 |
+
# XXX: memory usage doubles here (zero2)
|
269 |
+
num_param_groups = len(fp32_flat_groups[0])
|
270 |
+
merged_single_partition_of_fp32_groups = []
|
271 |
+
for i in range(num_param_groups):
|
272 |
+
merged_partitions = [sd[i] for sd in fp32_flat_groups]
|
273 |
+
full_single_fp32_vector = torch.cat(merged_partitions, 0)
|
274 |
+
merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
|
275 |
+
avail_numel = sum(
|
276 |
+
[full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
|
277 |
+
|
278 |
+
if debug:
|
279 |
+
wanted_params = sum([len(shapes) for shapes in param_shapes])
|
280 |
+
wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
|
281 |
+
# not asserting if there is a mismatch due to possible padding
|
282 |
+
print(f"Have {avail_numel} numels to process.")
|
283 |
+
print(f"Need {wanted_numel} numels in {wanted_params} params.")
|
284 |
+
|
285 |
+
# params
|
286 |
+
# XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
|
287 |
+
# out-of-core computing solution
|
288 |
+
total_numel = 0
|
289 |
+
total_params = 0
|
290 |
+
for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
|
291 |
+
offset = 0
|
292 |
+
avail_numel = full_single_fp32_vector.numel()
|
293 |
+
for name, shape in shapes.items():
|
294 |
+
|
295 |
+
unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape)
|
296 |
+
total_numel += unpartitioned_numel
|
297 |
+
total_params += 1
|
298 |
+
|
299 |
+
if debug:
|
300 |
+
print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
|
301 |
+
state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
|
302 |
+
offset += unpartitioned_numel
|
303 |
+
|
304 |
+
# Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
|
305 |
+
# avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
|
306 |
+
# paddings performed in the code it's almost impossible to predict the exact numbers w/o the
|
307 |
+
# live optimizer object, so we are checking that the numbers are within the right range
|
308 |
+
align_to = 2 * world_size
|
309 |
+
|
310 |
+
def zero2_align(x):
|
311 |
+
return align_to * math.ceil(x / align_to)
|
312 |
+
|
313 |
+
if debug:
|
314 |
+
print(f"original offset={offset}, avail_numel={avail_numel}")
|
315 |
+
|
316 |
+
offset = zero2_align(offset)
|
317 |
+
avail_numel = zero2_align(avail_numel)
|
318 |
+
|
319 |
+
if debug:
|
320 |
+
print(f"aligned offset={offset}, avail_numel={avail_numel}")
|
321 |
+
|
322 |
+
# Sanity check
|
323 |
+
if offset != avail_numel:
|
324 |
+
raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
|
325 |
+
|
326 |
+
print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
|
327 |
+
|
328 |
+
|
329 |
+
def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states):
|
330 |
+
state_dict = OrderedDict()
|
331 |
+
|
332 |
+
# buffers
|
333 |
+
buffers = zero_model_states[0].buffers
|
334 |
+
state_dict.update(buffers)
|
335 |
+
if debug:
|
336 |
+
print(f"added {len(buffers)} buffers")
|
337 |
+
|
338 |
+
_zero2_merge_frozen_params(state_dict, zero_model_states)
|
339 |
+
|
340 |
+
_zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
|
341 |
+
|
342 |
+
# recover shared parameters
|
343 |
+
for pair in zero_model_states[0].shared_params:
|
344 |
+
if pair[1] in state_dict:
|
345 |
+
state_dict[pair[0]] = state_dict[pair[1]]
|
346 |
+
|
347 |
+
return state_dict
|
348 |
+
|
349 |
+
|
350 |
+
def zero3_partitioned_param_info(unpartitioned_numel, world_size):
|
351 |
+
remainder = unpartitioned_numel % world_size
|
352 |
+
padding_numel = (world_size - remainder) if remainder else 0
|
353 |
+
partitioned_numel = math.ceil(unpartitioned_numel / world_size)
|
354 |
+
return partitioned_numel, padding_numel
|
355 |
+
|
356 |
+
|
357 |
+
def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
|
358 |
+
if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
|
359 |
+
return
|
360 |
+
|
361 |
+
if debug:
|
362 |
+
for i in range(world_size):
|
363 |
+
num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
|
364 |
+
print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
|
365 |
+
|
366 |
+
frozen_param_shapes = zero_model_states[0].frozen_param_shapes
|
367 |
+
wanted_params = len(frozen_param_shapes)
|
368 |
+
wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
|
369 |
+
avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
|
370 |
+
print(f'Frozen params: Have {avail_numel} numels to process.')
|
371 |
+
print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
|
372 |
+
|
373 |
+
total_params = 0
|
374 |
+
total_numel = 0
|
375 |
+
for name, shape in zero_model_states[0].frozen_param_shapes.items():
|
376 |
+
total_params += 1
|
377 |
+
unpartitioned_numel = shape.numel()
|
378 |
+
total_numel += unpartitioned_numel
|
379 |
+
|
380 |
+
param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
|
381 |
+
state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
|
382 |
+
|
383 |
+
partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
|
384 |
+
|
385 |
+
if debug:
|
386 |
+
print(
|
387 |
+
f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
|
388 |
+
)
|
389 |
+
|
390 |
+
print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
|
391 |
+
|
392 |
+
|
393 |
+
def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
|
394 |
+
param_shapes = zero_model_states[0].param_shapes
|
395 |
+
avail_numel = fp32_flat_groups[0].numel() * world_size
|
396 |
+
# Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
|
397 |
+
# param, re-consolidating each param, while dealing with padding if any
|
398 |
+
|
399 |
+
# merge list of dicts, preserving order
|
400 |
+
param_shapes = {k: v for d in param_shapes for k, v in d.items()}
|
401 |
+
|
402 |
+
if debug:
|
403 |
+
for i in range(world_size):
|
404 |
+
print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
|
405 |
+
|
406 |
+
wanted_params = len(param_shapes)
|
407 |
+
wanted_numel = sum(shape.numel() for shape in param_shapes.values())
|
408 |
+
# not asserting if there is a mismatch due to possible padding
|
409 |
+
avail_numel = fp32_flat_groups[0].numel() * world_size
|
410 |
+
print(f"Trainable params: Have {avail_numel} numels to process.")
|
411 |
+
print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
|
412 |
+
|
413 |
+
# params
|
414 |
+
# XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
|
415 |
+
# out-of-core computing solution
|
416 |
+
offset = 0
|
417 |
+
total_numel = 0
|
418 |
+
total_params = 0
|
419 |
+
for name, shape in param_shapes.items():
|
420 |
+
|
421 |
+
unpartitioned_numel = shape.numel()
|
422 |
+
total_numel += unpartitioned_numel
|
423 |
+
total_params += 1
|
424 |
+
|
425 |
+
partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
|
426 |
+
|
427 |
+
if debug:
|
428 |
+
print(
|
429 |
+
f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
|
430 |
+
)
|
431 |
+
|
432 |
+
# XXX: memory usage doubles here
|
433 |
+
state_dict[name] = torch.cat(
|
434 |
+
tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)),
|
435 |
+
0).narrow(0, 0, unpartitioned_numel).view(shape)
|
436 |
+
offset += partitioned_numel
|
437 |
+
|
438 |
+
offset *= world_size
|
439 |
+
|
440 |
+
# Sanity check
|
441 |
+
if offset != avail_numel:
|
442 |
+
raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
|
443 |
+
|
444 |
+
print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
|
445 |
+
|
446 |
+
|
447 |
+
def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states):
|
448 |
+
state_dict = OrderedDict()
|
449 |
+
|
450 |
+
# buffers
|
451 |
+
buffers = zero_model_states[0].buffers
|
452 |
+
state_dict.update(buffers)
|
453 |
+
if debug:
|
454 |
+
print(f"added {len(buffers)} buffers")
|
455 |
+
|
456 |
+
_zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
|
457 |
+
|
458 |
+
_zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
|
459 |
+
|
460 |
+
# recover shared parameters
|
461 |
+
for pair in zero_model_states[0].shared_params:
|
462 |
+
if pair[1] in state_dict:
|
463 |
+
state_dict[pair[0]] = state_dict[pair[1]]
|
464 |
+
|
465 |
+
return state_dict
|
466 |
+
|
467 |
+
|
468 |
+
def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None):
|
469 |
+
"""
|
470 |
+
Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
|
471 |
+
``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
|
472 |
+
via a model hub.
|
473 |
+
|
474 |
+
Args:
|
475 |
+
- ``checkpoint_dir``: path to the desired checkpoint folder
|
476 |
+
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
|
477 |
+
|
478 |
+
Returns:
|
479 |
+
- pytorch ``state_dict``
|
480 |
+
|
481 |
+
Note: this approach may not work if your application doesn't have sufficient free CPU memory and
|
482 |
+
you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
|
483 |
+
the checkpoint.
|
484 |
+
|
485 |
+
A typical usage might be ::
|
486 |
+
|
487 |
+
from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
|
488 |
+
# do the training and checkpoint saving
|
489 |
+
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
|
490 |
+
model = model.cpu() # move to cpu
|
491 |
+
model.load_state_dict(state_dict)
|
492 |
+
# submit to model hub or save the model to share with others
|
493 |
+
|
494 |
+
In this example the ``model`` will no longer be usable in the deepspeed context of the same
|
495 |
+
application. i.e. you will need to re-initialize the deepspeed engine, since
|
496 |
+
``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
|
497 |
+
|
498 |
+
If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
|
499 |
+
|
500 |
+
"""
|
501 |
+
if tag is None:
|
502 |
+
latest_path = os.path.join(checkpoint_dir, 'latest')
|
503 |
+
if os.path.isfile(latest_path):
|
504 |
+
with open(latest_path, 'r') as fd:
|
505 |
+
tag = fd.read().strip()
|
506 |
+
else:
|
507 |
+
raise ValueError(f"Unable to find 'latest' file at {latest_path}")
|
508 |
+
|
509 |
+
ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
|
510 |
+
|
511 |
+
if not os.path.isdir(ds_checkpoint_dir):
|
512 |
+
raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
|
513 |
+
|
514 |
+
return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir)
|
515 |
+
|
516 |
+
|
517 |
+
def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None):
|
518 |
+
"""
|
519 |
+
Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
|
520 |
+
loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
|
521 |
+
|
522 |
+
Args:
|
523 |
+
- ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
|
524 |
+
- ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
|
525 |
+
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
|
526 |
+
"""
|
527 |
+
|
528 |
+
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
|
529 |
+
print(f"Saving fp32 state dict to {output_file}")
|
530 |
+
torch.save(state_dict, output_file)
|
531 |
+
|
532 |
+
|
533 |
+
def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
|
534 |
+
"""
|
535 |
+
1. Put the provided model to cpu
|
536 |
+
2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
|
537 |
+
3. Load it into the provided model
|
538 |
+
|
539 |
+
Args:
|
540 |
+
- ``model``: the model object to update
|
541 |
+
- ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
|
542 |
+
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
|
543 |
+
|
544 |
+
Returns:
|
545 |
+
- ``model`: modified model
|
546 |
+
|
547 |
+
Make sure you have plenty of CPU memory available before you call this function. If you don't
|
548 |
+
have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
|
549 |
+
conveniently placed for you in the checkpoint folder.
|
550 |
+
|
551 |
+
A typical usage might be ::
|
552 |
+
|
553 |
+
from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
|
554 |
+
model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
|
555 |
+
# submit to model hub or save the model to share with others
|
556 |
+
|
557 |
+
Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
|
558 |
+
of the same application. i.e. you will need to re-initialize the deepspeed engine, since
|
559 |
+
``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
|
560 |
+
|
561 |
+
"""
|
562 |
+
logger.info(f"Extracting fp32 weights")
|
563 |
+
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
|
564 |
+
|
565 |
+
logger.info(f"Overwriting model with fp32 weights")
|
566 |
+
model = model.cpu()
|
567 |
+
model.load_state_dict(state_dict, strict=False)
|
568 |
+
|
569 |
+
return model
|
570 |
+
|
571 |
+
|
572 |
+
if __name__ == "__main__":
|
573 |
+
|
574 |
+
parser = argparse.ArgumentParser()
|
575 |
+
parser.add_argument("checkpoint_dir",
|
576 |
+
type=str,
|
577 |
+
help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
|
578 |
+
parser.add_argument(
|
579 |
+
"output_file",
|
580 |
+
type=str,
|
581 |
+
help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)")
|
582 |
+
parser.add_argument("-t",
|
583 |
+
"--tag",
|
584 |
+
type=str,
|
585 |
+
default=None,
|
586 |
+
help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
|
587 |
+
parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
|
588 |
+
args = parser.parse_args()
|
589 |
+
|
590 |
+
debug = args.debug
|
591 |
+
|
592 |
+
convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, args.output_file, tag=args.tag)
|