dwzhu commited on
Commit
8d6c59e
1 Parent(s): dc760c9

Upload pose_modeling_baichuan.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. pose_modeling_baichuan.py +960 -0
pose_modeling_baichuan.py ADDED
@@ -0,0 +1,960 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Modification Copyright 2023 Dawei Zhu
2
+ # Copyright 2023 Baichuan Inc. All Rights Reserved.
3
+
4
+ # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
5
+ #
6
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
7
+ # and OPT implementations in this library. It has been modified from its
8
+ # original forms to accommodate minor architectural differences compared
9
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
10
+ #
11
+ # Licensed under the Apache License, Version 2.0 (the "License");
12
+ # you may not use this file except in compliance with the License.
13
+ # You may obtain a copy of the License at
14
+ #
15
+ # http://www.apache.org/licenses/LICENSE-2.0
16
+ #
17
+ # Unless required by applicable law or agreed to in writing, software
18
+ # distributed under the License is distributed on an "AS IS" BASIS,
19
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
20
+ # See the License for the specific language governing permissions and
21
+ # limitations under the License.
22
+
23
+
24
+ from my_configuration_baichuan import BaichuanConfig
25
+ # from .generation_utils import build_chat_input, TextIterStreamer
26
+
27
+ import math
28
+ from typing import List, Optional, Tuple, Union
29
+ from threading import Thread
30
+
31
+ import numpy as np
32
+ import torch
33
+ import torch.utils.checkpoint
34
+ from torch import nn
35
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
36
+ from torch.nn import functional as F
37
+ from transformers import PreTrainedModel, PretrainedConfig
38
+ from transformers.activations import ACT2FN
39
+ from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
40
+ from transformers.generation.utils import GenerationConfig
41
+ from transformers.utils import logging, ContextManagers
42
+
43
+ import os
44
+ from contextlib import contextmanager
45
+ logger = logging.get_logger(__name__)
46
+
47
+ try:
48
+ from xformers import ops as xops
49
+ except ImportError:
50
+ xops = None
51
+ logger.warning(
52
+ "Xformers is not installed correctly. If you want to use memory_efficient_attention to accelerate training use the following command to install Xformers\npip install xformers."
53
+ )
54
+
55
+
56
+ # Copied from transformers.models.bart.modeling_bart._make_causal_mask
57
+ def _make_causal_mask(
58
+ input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
59
+ ):
60
+ """
61
+ Make causal mask used for bi-directional self-attention.
62
+ """
63
+ bsz, tgt_len = input_ids_shape
64
+ mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min, device=device), device=device)
65
+ mask_cond = torch.arange(mask.size(-1), device=device)
66
+ mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
67
+ mask = mask.to(dtype)
68
+
69
+ if past_key_values_length > 0:
70
+ mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
71
+ return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
72
+
73
+ def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
74
+ """
75
+ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
76
+ """
77
+ if len(mask.size()) == 3:
78
+ bsz, src_len, _ = mask.size()
79
+ tgt_len = tgt_len if tgt_len is not None else src_len
80
+ expanded_mask = mask[:,None,:,:].expand(bsz, 1, tgt_len, src_len).to(dtype)
81
+ else:
82
+ bsz, src_len = mask.size()
83
+ tgt_len = tgt_len if tgt_len is not None else src_len
84
+ expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
85
+
86
+ inverted_mask = 1.0 - expanded_mask
87
+
88
+ return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
89
+
90
+
91
+ class RMSNorm(nn.Module):
92
+ def __init__(self, hidden_size, eps=1e-6):
93
+ """
94
+ RMSNorm is equivalent to T5LayerNorm
95
+ """
96
+ super().__init__()
97
+ self.weight = nn.Parameter(torch.ones(hidden_size))
98
+ self.variance_epsilon = eps
99
+
100
+ def forward(self, hidden_states):
101
+ variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
102
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
103
+
104
+ # convert into half-precision if necessary
105
+ if self.weight.dtype in [torch.float16, torch.bfloat16]:
106
+ hidden_states = hidden_states.to(self.weight.dtype)
107
+
108
+ return self.weight * hidden_states
109
+
110
+
111
+ class RotaryEmbedding(torch.nn.Module):
112
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
113
+ super().__init__()
114
+
115
+ self.dim = dim
116
+ self.max_position_embeddings = max_position_embeddings
117
+ self.base = base
118
+ inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
119
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
120
+
121
+ # Build here to make `torch.jit.trace` work.
122
+ self._set_cos_sin_cache(
123
+ seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()
124
+ )
125
+
126
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
127
+ self.max_seq_len_cached = seq_len
128
+ t = torch.arange(self.max_seq_len_cached, device=self.inv_freq.device, dtype=torch.float32)
129
+ freqs = torch.outer(t, self.inv_freq)
130
+ emb = torch.cat((freqs, freqs), dim=-1)
131
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
132
+ emb = torch.cat((freqs, freqs), dim=-1)
133
+ self.cos_cached = emb.cos()[None, None, :, :].to(torch.float32)
134
+ self.sin_cached = emb.sin()[None, None, :, :].to(torch.float32)
135
+
136
+
137
+ def forward(self, x, seq_len=None):
138
+ # x: [bs, num_attention_heads, seq_len, head_size]
139
+ # This `if` block is unlikely to be run after we build sin/cos in `__init__`. Keep the logic here just in case.
140
+ if seq_len > self.max_seq_len_cached:
141
+ self.max_seq_len_cached = seq_len
142
+ self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype)
143
+
144
+ elif self.cos_cached.device != x.device:
145
+ self.cos_cached = self.cos_cached.to(x.device)
146
+ self.sin_cached = self.sin_cached.to(x.device)
147
+ return (
148
+ self.cos_cached[:, :, :, ...],
149
+ self.sin_cached[:, :, :, ...],
150
+ )
151
+
152
+ class LinearScalingRotaryEmbedding(RotaryEmbedding):
153
+
154
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
155
+ self.scaling_factor = scaling_factor
156
+ super().__init__(dim, max_position_embeddings, base, device)
157
+
158
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
159
+ self.max_seq_len_cached = seq_len
160
+ t = torch.arange(self.max_seq_len_cached, device=self.inv_freq.device, dtype=torch.float32)
161
+ t = t / self.scaling_factor
162
+ freqs = torch.outer(t, self.inv_freq)
163
+ emb = torch.cat((freqs, freqs), dim=-1)
164
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
165
+ emb = torch.cat((freqs, freqs), dim=-1)
166
+ self.cos_cached = emb.cos()[None, None, :, :].to(torch.float32)
167
+ self.sin_cached = emb.sin()[None, None, :, :].to(torch.float32)
168
+
169
+
170
+ class VanillaNTKScalingRotaryEmbedding(RotaryEmbedding):
171
+
172
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
173
+ self.scaling_factor = scaling_factor
174
+ super().__init__(dim, max_position_embeddings, base, device)
175
+
176
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
177
+ self.max_seq_len_cached = seq_len
178
+
179
+ base = self.base * self.scaling_factor ** (self.dim / (self.dim - 2))
180
+ inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
181
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
182
+
183
+ t = torch.arange(self.max_seq_len_cached, device=self.inv_freq.device, dtype=torch.float32)
184
+ freqs = torch.outer(t, self.inv_freq)
185
+ emb = torch.cat((freqs, freqs), dim=-1)
186
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
187
+ emb = torch.cat((freqs, freqs), dim=-1)
188
+ self.cos_cached = emb.cos()[None, None, :, :].to(torch.float32)
189
+ self.sin_cached = emb.sin()[None, None, :, :].to(torch.float32)
190
+
191
+ def _yarn_find_correction_dim(num_rotations, dim, base=10000, max_position_embeddings=2048):
192
+ return (dim * math.log(max_position_embeddings/(num_rotations * 2 * math.pi)))/(2 * math.log(base))
193
+
194
+ # Find dim range bounds based on rotations
195
+ def _yarn_find_correction_range(low_rot, high_rot, dim, base=10000, max_position_embeddings=2048):
196
+ low = math.floor(_yarn_find_correction_dim(
197
+ low_rot, dim, base, max_position_embeddings))
198
+ high = math.ceil(_yarn_find_correction_dim(
199
+ high_rot, dim, base, max_position_embeddings))
200
+ return max(low, 0), min(high, dim-1) # Clamp values just in case
201
+
202
+ def _yarn_linear_ramp_mask(min, max, dim):
203
+ if min == max:
204
+ max += 0.001 # Prevent singularity
205
+
206
+ linear_func = (torch.arange(dim, dtype=torch.float32) - min) / (max - min)
207
+ ramp_func = torch.clamp(linear_func, 0, 1)
208
+ return ramp_func
209
+
210
+ def _yarn_get_mscale(scale=1):
211
+ if scale <= 1:
212
+ return 1.0
213
+ return 0.1 * math.log(scale) + 1.0
214
+
215
+
216
+ class YaRNScaledRotaryEmbedding(torch.nn.Module):
217
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, scale=1, original_max_position_embeddings=2048, extrapolation_factor=1, attn_factor=1, beta_fast=32, beta_slow=1, finetuned=False, device=None):
218
+ super().__init__()
219
+
220
+ self.dim = dim
221
+ self.max_position_embeddings = max_position_embeddings
222
+ self.base = base
223
+ self.scale = scale
224
+ self.original_max_position_embeddings = original_max_position_embeddings
225
+ self.extrapolation_factor = extrapolation_factor
226
+ self.attn_factor = attn_factor
227
+ self.beta_fast = beta_fast
228
+ self.beta_slow = beta_slow
229
+
230
+ # self.yarn(device)
231
+ self.revised_yarn(device)
232
+
233
+ # Build here to make `torch.jit.trace` work.
234
+ self.max_seq_len_cached = max_position_embeddings
235
+
236
+ t = torch.arange(self.max_seq_len_cached, device=self.inv_freq.device, dtype=torch.float32)
237
+ freqs = torch.outer(t, self.inv_freq.to(device=t.device).to(t.dtype))
238
+ # t = torch.arange(self.max_seq_len_cached, device=self.inv_freq.device, dtype=self.inv_freq.dtype)
239
+ # freqs = torch.einsum("i,j->ij", t, self.inv_freq)
240
+
241
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
242
+ emb = torch.cat((freqs, freqs), dim=-1)
243
+ dtype = torch.get_default_dtype()
244
+
245
+ self.register_buffer("cos_cached", (emb.cos() * self.mscale)[None, None, :, :].to(dtype), persistent=False)
246
+ self.register_buffer("sin_cached", (emb.sin() * self.mscale)[None, None, :, :].to(dtype), persistent=False)
247
+
248
+ def forward(self, x, seq_len=None):
249
+ # x: [bs, num_attention_heads, seq_len, head_size]
250
+ # This `if` block is unlikely to be run after we build sin/cos in `__init__`. Keep the logic here just in case.
251
+ if seq_len > self.max_seq_len_cached:
252
+ print("*****notice******")
253
+ self.max_seq_len_cached = seq_len
254
+
255
+ t = torch.arange(self.max_seq_len_cached, device=x.device, dtype=self.inv_freq.dtype)
256
+ freqs = torch.einsum("i,j->ij", t, self.inv_freq)
257
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
258
+ emb = torch.cat((freqs, freqs), dim=-1).to(x.device)
259
+
260
+ self.register_buffer("cos_cached", (emb.cos() * self.mscale)[None, None, :, :].to(x.dtype), persistent=False)
261
+ self.register_buffer("sin_cached", (emb.sin() * self.mscale)[None, None, :, :].to(x.dtype), persistent=False)
262
+ return (
263
+ self.cos_cached[:, :, :, ...].to(dtype=x.dtype),
264
+ self.sin_cached[:, :, :, ...].to(dtype=x.dtype),
265
+ )
266
+
267
+ def yarn(self, device):
268
+ pos_freqs = self.base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim)
269
+ inv_freq_extrapolation = 1.0 / pos_freqs
270
+ inv_freq_interpolation = 1.0 / (self.scale * pos_freqs)
271
+
272
+ low, high = _yarn_find_correction_range(self.beta_fast, self.beta_slow, self.dim, self.base, self.original_max_position_embeddings)
273
+ inv_freq_mask = (1 - _yarn_linear_ramp_mask(low, high, self.dim // 2).float().to(device)) * self.extrapolation_factor # Get n-d rotational scaling corrected for extrapolation
274
+ inv_freq = inv_freq_interpolation * (1 - inv_freq_mask) + inv_freq_extrapolation * inv_freq_mask
275
+
276
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
277
+ self.mscale = float(_yarn_get_mscale(self.scale) * self.attn_factor) # Get n-d magnitude scaling corrected for interpolation
278
+
279
+ def revised_yarn(self, device):
280
+ inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
281
+
282
+ low, high = _yarn_find_correction_range(self.beta_fast, self.beta_slow, self.dim, self.base, self.original_max_position_embeddings)
283
+ inv_freq_mask = (1 - _yarn_linear_ramp_mask(low, high, self.dim // 2).float().to(device)) * self.extrapolation_factor
284
+
285
+ inv_freq = inv_freq / ((1-inv_freq_mask)*self.scale + inv_freq_mask)
286
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
287
+ self.mscale = float(_yarn_get_mscale(self.scale) * self.attn_factor)
288
+
289
+
290
+ def rotate_half(x):
291
+ """Rotates half the hidden dims of the input."""
292
+ x1 = x[..., : x.shape[-1] // 2]
293
+ x2 = x[..., x.shape[-1] // 2:]
294
+ return torch.cat((-x2, x1), dim=-1)
295
+
296
+
297
+ def apply_rotary_pos_emb(q, k, cos_, sin_, position_ids):
298
+ cos = cos_.squeeze(1).squeeze(0) # [seq_len, dim]
299
+ sin = sin_.squeeze(1).squeeze(0) # [seq_len, dim]
300
+ cos = cos[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim]
301
+ sin = sin[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim]
302
+ q_embed = (q.float() * cos) + (rotate_half(q.float()) * sin)
303
+ k_embed = (k.float() * cos) + (rotate_half(k.float()) * sin)
304
+ return q_embed.to(q.dtype), k_embed.to(k.dtype)
305
+
306
+
307
+ class MLP(nn.Module):
308
+ def __init__(
309
+ self,
310
+ hidden_size: int,
311
+ intermediate_size: int,
312
+ hidden_act: str,
313
+ ):
314
+ super().__init__()
315
+ self.gate_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
316
+ self.down_proj = nn.Linear(intermediate_size, hidden_size, bias=False)
317
+ self.up_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
318
+ self.act_fn = ACT2FN[hidden_act]
319
+
320
+ def forward(self, x):
321
+ return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
322
+
323
+
324
+ class Attention(nn.Module):
325
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
326
+ def __init__(self, config: BaichuanConfig):
327
+ super().__init__()
328
+ self.config = config
329
+ self.hidden_size = config.hidden_size
330
+ self.num_heads = config.num_attention_heads
331
+ self.head_dim = self.hidden_size // self.num_heads
332
+ self.max_position_embeddings = config.max_position_embeddings
333
+
334
+ if (self.head_dim * self.num_heads) != self.hidden_size:
335
+ raise ValueError(
336
+ f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
337
+ f" and `num_heads`: {self.num_heads})."
338
+ )
339
+ self.W_pack = nn.Linear(self.hidden_size, 3 * self.hidden_size, bias=False)
340
+ self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
341
+ self._init_rope()
342
+ # self.rotary_emb = RotaryEmbedding(self.head_dim, max_position_embeddings=self.max_position_embeddings)
343
+
344
+ def _init_rope(self):
345
+ if self.config.rope_scaling is None:
346
+ self.rotary_emb = RotaryEmbedding(self.head_dim, max_position_embeddings=self.max_position_embeddings)
347
+ else:
348
+ scaling_type = self.config.rope_scaling["type"]
349
+ scaling_factor = self.config.rope_scaling["factor"]
350
+ if scaling_type == "linear":
351
+ self.rotary_emb = LinearScalingRotaryEmbedding(
352
+ self.head_dim, max_position_embeddings=self.max_position_embeddings, scaling_factor=scaling_factor
353
+ )
354
+ elif scaling_type == "vanilla_ntk":
355
+ self.rotary_emb = VanillaNTKScalingRotaryEmbedding(
356
+ self.head_dim, max_position_embeddings=self.max_position_embeddings, scaling_factor=scaling_factor
357
+ )
358
+ elif scaling_type == "yarn":
359
+ original_max_position_embeddings = self.config.rope_scaling["original_max_position_embeddings"]
360
+ self.rotary_emb = YaRNScaledRotaryEmbedding(
361
+ self.head_dim, max_position_embeddings=self.max_position_embeddings, scale=scaling_factor, original_max_position_embeddings=original_max_position_embeddings
362
+ )
363
+ else:
364
+ raise ValueError(f"Unknown RoPE scaling type {scaling_type}")
365
+
366
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
367
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
368
+
369
+ def forward(
370
+ self,
371
+ hidden_states: torch.Tensor,
372
+ attention_mask: Optional[torch.Tensor] = None,
373
+ position_ids: Optional[torch.LongTensor] = None,
374
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
375
+ output_attentions: bool = False,
376
+ use_cache: bool = False,
377
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
378
+ bsz, q_len, _ = hidden_states.size()
379
+
380
+ proj = self.W_pack(hidden_states)
381
+ proj = proj.unflatten(-1, (3, self.hidden_size)).unsqueeze(0).transpose(0, -2).squeeze(-2)
382
+ query_states = proj[0].view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
383
+ key_states = proj[1].view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
384
+ value_states = proj[2].view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
385
+
386
+ kv_seq_len = key_states.shape[-2]
387
+ if past_key_value is not None:
388
+ kv_seq_len += past_key_value[0].shape[-2]
389
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
390
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
391
+ # [bsz, nh, t, hd]
392
+
393
+ if past_key_value is not None:
394
+ # reuse k, v, self_attention
395
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
396
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
397
+
398
+ past_key_value = (key_states, value_states) if use_cache else None
399
+ if xops is not None and self.training:
400
+ attn_weights = None
401
+ query_states = query_states.transpose(1, 2)
402
+ key_states = key_states.transpose(1, 2)
403
+ value_states = value_states.transpose(1, 2)
404
+ attn_output = xops.memory_efficient_attention(
405
+ query_states, key_states, value_states, attn_bias=xops.LowerTriangularMask()
406
+ )
407
+ else:
408
+ with torch.backends.cuda.sdp_kernel(enable_flash=True, enable_math=True, enable_mem_efficient=True):
409
+ attn_output = F.scaled_dot_product_attention(query_states, key_states, value_states, attn_mask = attention_mask)
410
+ attn_output = attn_output.transpose(1, 2)
411
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
412
+ attn_output = self.o_proj(attn_output)
413
+
414
+ if not output_attentions:
415
+ attn_weights = None
416
+
417
+ return attn_output, attn_weights, past_key_value
418
+
419
+
420
+ class DecoderLayer(nn.Module):
421
+ def __init__(self, config: BaichuanConfig):
422
+ super().__init__()
423
+ self.hidden_size = config.hidden_size
424
+ self.self_attn = Attention(config=config)
425
+ self.mlp = MLP(
426
+ hidden_size=self.hidden_size,
427
+ intermediate_size=config.intermediate_size,
428
+ hidden_act=config.hidden_act,
429
+ )
430
+ self.input_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
431
+ self.post_attention_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
432
+
433
+ def forward(
434
+ self,
435
+ hidden_states: torch.Tensor,
436
+ attention_mask: Optional[torch.Tensor] = None,
437
+ position_ids: Optional[torch.LongTensor] = None,
438
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
439
+ output_attentions: Optional[bool] = False,
440
+ use_cache: Optional[bool] = False,
441
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
442
+
443
+ residual = hidden_states
444
+
445
+ hidden_states = self.input_layernorm(hidden_states)
446
+
447
+ # Self Attention
448
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
449
+ hidden_states=hidden_states,
450
+ attention_mask=attention_mask,
451
+ position_ids=position_ids,
452
+ past_key_value=past_key_value,
453
+ output_attentions=output_attentions,
454
+ use_cache=use_cache,
455
+ )
456
+ hidden_states = residual + hidden_states
457
+
458
+ # Fully Connected
459
+ residual = hidden_states
460
+ hidden_states = self.post_attention_layernorm(hidden_states)
461
+ hidden_states = self.mlp(hidden_states)
462
+ hidden_states = residual + hidden_states
463
+
464
+ outputs = (hidden_states,)
465
+
466
+ if output_attentions:
467
+ outputs += (self_attn_weights,)
468
+
469
+ if use_cache:
470
+ outputs += (present_key_value,)
471
+
472
+ return outputs
473
+
474
+
475
+ class BaichuanPreTrainedModel(PreTrainedModel):
476
+ config_class = BaichuanConfig
477
+ base_model_prefix = "model"
478
+ supports_gradient_checkpointing = True
479
+ _no_split_modules = ["DecoderLayer"]
480
+ _keys_to_ignore_on_load_unexpected = [r"decoder\.version"]
481
+
482
+ def _init_weights(self, module):
483
+ std = self.config.initializer_range
484
+ if isinstance(module, nn.Linear):
485
+ module.weight.data.normal_(mean=0.0, std=std)
486
+ if module.bias is not None:
487
+ module.bias.data.zero_()
488
+ elif isinstance(module, nn.Embedding):
489
+ module.weight.data.normal_(mean=0.0, std=std)
490
+ if module.padding_idx is not None:
491
+ module.weight.data[module.padding_idx].zero_()
492
+
493
+ def _set_gradient_checkpointing(self, module, value=False):
494
+ if isinstance(module, BaichuanModel):
495
+ module.gradient_checkpointing = value
496
+
497
+
498
+ class BaichuanModel(BaichuanPreTrainedModel):
499
+ def __init__(self, config: BaichuanConfig):
500
+ super().__init__(config)
501
+ self.padding_idx = config.pad_token_id
502
+ self.vocab_size = config.vocab_size
503
+
504
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
505
+ self.layers = nn.ModuleList([DecoderLayer(config) for _ in range(config.num_hidden_layers)])
506
+ self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
507
+
508
+ self.gradient_checkpointing = False
509
+ # Initialize weights and apply final processing
510
+ self.post_init()
511
+
512
+ def get_input_embeddings(self):
513
+ return self.embed_tokens
514
+
515
+ def set_input_embeddings(self, value):
516
+ self.embed_tokens = value
517
+
518
+ # Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask
519
+ def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
520
+ # create causal mask
521
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
522
+ combined_attention_mask = None
523
+ if input_shape[-1] > 1:
524
+ combined_attention_mask = _make_causal_mask(
525
+ input_shape,
526
+ inputs_embeds.dtype,
527
+ device=inputs_embeds.device,
528
+ past_key_values_length=past_key_values_length,
529
+ )
530
+
531
+ if attention_mask is not None:
532
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
533
+ expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to(
534
+ inputs_embeds.device
535
+ )
536
+ combined_attention_mask = (
537
+ expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
538
+ )
539
+
540
+ return combined_attention_mask
541
+
542
+ def forward(
543
+ self,
544
+ input_ids: torch.LongTensor = None,
545
+ attention_mask: Optional[torch.Tensor] = None,
546
+ position_ids: Optional[torch.LongTensor] = None,
547
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
548
+ inputs_embeds: Optional[torch.FloatTensor] = None,
549
+ use_cache: Optional[bool] = None,
550
+ output_attentions: Optional[bool] = None,
551
+ output_hidden_states: Optional[bool] = None,
552
+ return_dict: Optional[bool] = None,
553
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
554
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
555
+ output_hidden_states = (
556
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
557
+ )
558
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
559
+
560
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
561
+
562
+ # retrieve input_ids and inputs_embeds
563
+ if input_ids is not None and inputs_embeds is not None:
564
+ raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
565
+ elif input_ids is not None:
566
+ batch_size, seq_length = input_ids.shape
567
+ elif inputs_embeds is not None:
568
+ batch_size, seq_length, _ = inputs_embeds.shape
569
+ else:
570
+ raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
571
+
572
+ seq_length_with_past = seq_length
573
+ past_key_values_length = 0
574
+
575
+ if past_key_values is not None:
576
+ past_key_values_length = past_key_values[0][0].shape[2]
577
+ seq_length_with_past = seq_length_with_past + past_key_values_length
578
+
579
+ if position_ids is None:
580
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
581
+ position_ids = torch.arange(
582
+ past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
583
+ )
584
+ position_ids = position_ids.unsqueeze(0).view(-1, seq_length)
585
+ else:
586
+ position_ids = position_ids.view(-1, seq_length).long()
587
+
588
+ if inputs_embeds is None:
589
+ inputs_embeds = self.embed_tokens(input_ids)
590
+ # embed positions
591
+ if attention_mask is None:
592
+ attention_mask = torch.ones(
593
+ (batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device
594
+ )
595
+ attention_mask = self._prepare_decoder_attention_mask(
596
+ attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length
597
+ )
598
+
599
+ hidden_states = inputs_embeds
600
+
601
+ if self.gradient_checkpointing and self.training:
602
+ if use_cache:
603
+ logger.warning_once(
604
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
605
+ )
606
+ use_cache = False
607
+
608
+ # decoder layers
609
+ all_hidden_states = () if output_hidden_states else None
610
+ all_self_attns = () if output_attentions else None
611
+ next_decoder_cache = () if use_cache else None
612
+
613
+ for idx, decoder_layer in enumerate(self.layers):
614
+ if output_hidden_states:
615
+ all_hidden_states += (hidden_states,)
616
+
617
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
618
+
619
+ if self.gradient_checkpointing and self.training:
620
+
621
+ def create_custom_forward(module):
622
+ def custom_forward(*inputs):
623
+ # None for past_key_value
624
+ return module(*inputs, output_attentions, None)
625
+
626
+ return custom_forward
627
+
628
+ layer_outputs = torch.utils.checkpoint.checkpoint(
629
+ create_custom_forward(decoder_layer),
630
+ hidden_states,
631
+ attention_mask,
632
+ position_ids,
633
+ None,
634
+ )
635
+ else:
636
+ layer_outputs = decoder_layer(
637
+ hidden_states,
638
+ attention_mask=attention_mask,
639
+ position_ids=position_ids,
640
+ past_key_value=past_key_value,
641
+ output_attentions=output_attentions,
642
+ use_cache=use_cache,
643
+ )
644
+
645
+ hidden_states = layer_outputs[0]
646
+
647
+ if use_cache:
648
+ next_decoder_cache += (layer_outputs[2 if output_attentions else 1],)
649
+
650
+ if output_attentions:
651
+ all_self_attns += (layer_outputs[1],)
652
+
653
+ hidden_states = self.norm(hidden_states)
654
+
655
+ # add hidden states from the last decoder layer
656
+ if output_hidden_states:
657
+ all_hidden_states += (hidden_states,)
658
+
659
+ next_cache = next_decoder_cache if use_cache else None
660
+ if not return_dict:
661
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
662
+ return BaseModelOutputWithPast(
663
+ last_hidden_state=hidden_states,
664
+ past_key_values=next_cache,
665
+ hidden_states=all_hidden_states,
666
+ attentions=all_self_attns,
667
+ )
668
+
669
+
670
+ class NormHead(nn.Module):
671
+ def __init__(self, hidden_size, vocab_size, bias=False):
672
+ super().__init__()
673
+ self.weight = nn.Parameter(torch.empty((vocab_size, hidden_size)))
674
+ nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
675
+ self.first_flag = True
676
+
677
+ def forward(self, hidden_states):
678
+ if self.training:
679
+ norm_weight = nn.functional.normalize(self.weight)
680
+ self.first_flag = True
681
+ elif self.first_flag:
682
+ self.first_flag = False
683
+ self.weight = nn.Parameter(nn.functional.normalize(self.weight))
684
+ norm_weight = self.weight
685
+ else:
686
+ norm_weight = self.weight
687
+ return nn.functional.linear(hidden_states, norm_weight)
688
+
689
+ _init_weights = True
690
+ @contextmanager
691
+ def no_init_weights(_enable=True):
692
+ global _init_weights
693
+ old_init_weights = _init_weights
694
+ if _enable:
695
+ _init_weights = False
696
+ try:
697
+ yield
698
+ finally:
699
+ _init_weights = old_init_weights
700
+
701
+ class BaichuanForCausalLM(BaichuanPreTrainedModel):
702
+ def __init__(self, config, *model_args, **model_kwargs):
703
+ super().__init__(config, *model_args, **model_kwargs)
704
+ self.model = BaichuanModel(config)
705
+
706
+ self.lm_head = NormHead(config.hidden_size, config.vocab_size, bias=False)
707
+ if hasattr(config, "quantization_config") and isinstance(config.quantization_config, dict) and config.quantization_config.get('load_in_4bit', False):
708
+ try:
709
+ from .quantizer import quantize_offline, init_model_weight_int4
710
+ except ImportError:
711
+ raise ImportError(f"Needs QLinear to run quantize.")
712
+ quantize_offline(self, 4)
713
+ # Initialize weights and apply final processing
714
+ self.post_init()
715
+
716
+ def get_input_embeddings(self):
717
+ return self.model.embed_tokens
718
+
719
+ def set_input_embeddings(self, value):
720
+ self.model.embed_tokens = value
721
+
722
+ def get_output_embeddings(self):
723
+ return self.lm_head
724
+
725
+ def set_output_embeddings(self, new_embeddings):
726
+ self.lm_head = new_embeddings
727
+
728
+ def set_decoder(self, decoder):
729
+ self.model = decoder
730
+
731
+ def get_decoder(self):
732
+ return self.model
733
+
734
+ @classmethod
735
+ def from_pretrained(
736
+ cls,
737
+ pretrained_model_name_or_path: Optional[Union[str, os.PathLike]],
738
+ *model_args,
739
+ config: Optional[Union[PretrainedConfig, str, os.PathLike]] = None,
740
+ cache_dir: Optional[Union[str, os.PathLike]] = None,
741
+ ignore_mismatched_sizes: bool = False,
742
+ force_download: bool = False,
743
+ local_files_only: bool = False,
744
+ token: Optional[Union[str, bool]] = None,
745
+ revision: str = "main",
746
+ use_safetensors: bool = None,
747
+ **kwargs,
748
+ ):
749
+ # Load config if we don't provide a configuration
750
+ if not isinstance(config, PretrainedConfig):
751
+ config_path = config if config is not None else pretrained_model_name_or_path
752
+ config, model_kwargs = cls.config_class.from_pretrained(
753
+ config_path,
754
+ cache_dir=cache_dir,
755
+ return_unused_kwargs=True,
756
+ force_download=force_download,
757
+ resume_download=False,
758
+ proxies=None,
759
+ local_files_only=local_files_only,
760
+ token=token,
761
+ revision=revision,
762
+ subfolder="",
763
+ _from_auto=False,
764
+ _from_pipeline=None,
765
+ **kwargs,
766
+ )
767
+ else:
768
+ model_kwargs = kwargs
769
+
770
+ if hasattr(config, "quantization_config") and config.quantization_config['load_in_4bit']:
771
+ try:
772
+ from .quantizer import init_model_weight_int4
773
+ from accelerate import init_empty_weights, dispatch_model, infer_auto_device_map
774
+ from accelerate.utils import CustomDtype
775
+ from accelerate.utils import get_balanced_memory
776
+ except ImportError:
777
+ raise ImportError(f"Needs import model weight init func to run quantize.")
778
+ # Instantiate model.
779
+ init_contexts = [no_init_weights(_enable=True)]
780
+ init_contexts.append(init_empty_weights())
781
+ with ContextManagers(init_contexts):
782
+ model = cls(config)
783
+
784
+ model_file = os.path.join(pretrained_model_name_or_path, 'pytorch_model.bin')
785
+ state_dict = torch.load(model_file, map_location="cpu")
786
+ model.is_quantized = True
787
+
788
+ device_map = kwargs.pop("device_map", None)
789
+ torch_dtype = kwargs.pop("torch_dtype", None)
790
+
791
+ if device_map is not None:
792
+ kwargs = {"no_split_module_classes": model._no_split_modules}
793
+ target_dtype = CustomDtype.INT4
794
+ max_memory = get_balanced_memory(
795
+ model,
796
+ dtype=target_dtype,
797
+ low_zero=(device_map == "balanced_low_0"),
798
+ max_memory=None,
799
+ **kwargs,
800
+ )
801
+ kwargs["max_memory"] = max_memory
802
+ device_map = infer_auto_device_map(model, dtype=target_dtype, **kwargs)
803
+
804
+ model = init_model_weight_int4(config, model, state_dict)
805
+
806
+ # Set model in evaluation mode to deactivate DropOut modules by default
807
+ model.eval()
808
+ # If it is a model with generation capabilities, attempt to load the generation config
809
+ if model.can_generate():
810
+ try:
811
+ model.generation_config = GenerationConfig.from_pretrained(
812
+ pretrained_model_name_or_path,
813
+ cache_dir=cache_dir,
814
+ force_download=force_download,
815
+ resume_download=False,
816
+ proxies=None,
817
+ local_files_only=local_files_only,
818
+ token=token,
819
+ revision=revision,
820
+ subfolder="",
821
+ _from_auto=False,
822
+ _from_pipeline=None,
823
+ **kwargs,
824
+ )
825
+ except (OSError, TypeError):
826
+ logger.info(
827
+ "Generation config file not found, using a generation config created from the model config."
828
+ )
829
+ pass
830
+
831
+ if device_map is not None:
832
+ dispatch_model(model, device_map=device_map)
833
+
834
+ return model
835
+ return super(BaichuanForCausalLM, cls).from_pretrained(pretrained_model_name_or_path, *model_args,
836
+ config=config, cache_dir=cache_dir, ignore_mismatched_sizes=ignore_mismatched_sizes,
837
+ force_download=force_download, local_files_only=local_files_only, token=token, revision=revision,
838
+ use_safetensors=use_safetensors, **kwargs)
839
+
840
+ def forward(
841
+ self,
842
+ input_ids: torch.LongTensor = None,
843
+ attention_mask: Optional[torch.Tensor] = None,
844
+ position_ids: Optional[torch.LongTensor] = None,
845
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
846
+ inputs_embeds: Optional[torch.FloatTensor] = None,
847
+ labels: Optional[torch.LongTensor] = None,
848
+ use_cache: Optional[bool] = None,
849
+ output_attentions: Optional[bool] = None,
850
+ output_hidden_states: Optional[bool] = None,
851
+ return_dict: Optional[bool] = None,
852
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
853
+
854
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
855
+ output_hidden_states = (
856
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
857
+ )
858
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
859
+
860
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
861
+ outputs = self.model(
862
+ input_ids=input_ids,
863
+ attention_mask=attention_mask,
864
+ position_ids=position_ids,
865
+ past_key_values=past_key_values,
866
+ inputs_embeds=inputs_embeds,
867
+ use_cache=use_cache,
868
+ output_attentions=output_attentions,
869
+ output_hidden_states=output_hidden_states,
870
+ return_dict=return_dict,
871
+ )
872
+
873
+ hidden_states = outputs[0]
874
+ logits = self.lm_head(hidden_states)
875
+ loss = None
876
+ if labels is not None:
877
+ # Shift so that tokens < n predict n
878
+ shift_logits = logits[..., :-1, :].contiguous()
879
+ shift_labels = labels[..., 1:].contiguous()
880
+ # Flatten the tokens
881
+ loss_fct = CrossEntropyLoss()
882
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
883
+ shift_labels = shift_labels.view(-1)
884
+ softmax_normalizer = shift_logits.max(-1).values ** 2
885
+ z_loss = self.config.z_loss_weight * softmax_normalizer.mean()
886
+ # Enable model parallelism
887
+ shift_labels = shift_labels.to(shift_logits.device)
888
+ loss = loss_fct(shift_logits, shift_labels) + z_loss
889
+
890
+ if not return_dict:
891
+ output = (logits,) + outputs[1:]
892
+ return (loss,) + output if loss is not None else output
893
+
894
+ return CausalLMOutputWithPast(
895
+ loss=loss,
896
+ logits=logits,
897
+ past_key_values=outputs.past_key_values,
898
+ hidden_states=outputs.hidden_states,
899
+ attentions=outputs.attentions,
900
+ )
901
+
902
+ def prepare_inputs_for_generation(
903
+ self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
904
+ ):
905
+ if past_key_values:
906
+ input_ids = input_ids[:, -1:]
907
+
908
+ position_ids = kwargs.get("position_ids", None)
909
+ if attention_mask is not None and position_ids is None:
910
+ # create position_ids on the fly for batch generation
911
+ position_ids = attention_mask.long().cumsum(-1) - 1
912
+ position_ids.masked_fill_(attention_mask == 0, 1)
913
+ if past_key_values:
914
+ position_ids = position_ids[:, -1].unsqueeze(-1)
915
+
916
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
917
+ if inputs_embeds is not None and past_key_values is None:
918
+ model_inputs = {"inputs_embeds": inputs_embeds}
919
+ else:
920
+ model_inputs = {"input_ids": input_ids}
921
+
922
+ model_inputs.update(
923
+ {
924
+ "position_ids": position_ids,
925
+ "past_key_values": past_key_values,
926
+ "use_cache": kwargs.get("use_cache"),
927
+ "attention_mask": attention_mask,
928
+ }
929
+ )
930
+ return model_inputs
931
+
932
+ @staticmethod
933
+ def _reorder_cache(past_key_values, beam_idx):
934
+ reordered_past = ()
935
+ for layer_past in past_key_values:
936
+ reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
937
+ return reordered_past
938
+
939
+ def quantize(self, bits: int):
940
+ try:
941
+ from .quantizer import quantize_online
942
+ except ImportError:
943
+ raise ImportError(f"Needs QLinear to run quantize.")
944
+ return quantize_online(self, bits)
945
+
946
+ def chat(self, tokenizer, messages: List[dict], stream=False,
947
+ generation_config: Optional[GenerationConfig]=None):
948
+ generation_config = generation_config or self.generation_config
949
+ input_ids = build_chat_input(self, tokenizer, messages, generation_config.max_new_tokens)
950
+ if stream:
951
+ streamer = TextIterStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
952
+ Thread(target=self.generate, kwargs=dict(
953
+ inputs=input_ids, streamer=streamer,
954
+ generation_config=generation_config,
955
+ )).start()
956
+ return streamer
957
+ else:
958
+ outputs = self.generate(input_ids, generation_config=generation_config)
959
+ response = tokenizer.decode(outputs[0][len(input_ids[0]):], skip_special_tokens=True)
960
+ return response