diff --git a/.gitattributes b/.gitattributes
index a6344aac8c09253b3b630fb776ae94478aa0275b..552fa94c09b139f61137af3c85850dfc82b59dad 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -33,3 +33,56 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
*.zip filter=lfs diff=lfs merge=lfs -text
*.zst filter=lfs diff=lfs merge=lfs -text
*tfevents* filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00052-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00030-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00041-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00045-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00003-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00008-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00032-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00046-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00048-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00012-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00049-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00050-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00013-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00027-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00006-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00022-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00023-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00036-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00010-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00011-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00016-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00040-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00014-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00029-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00037-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00044-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00005-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00028-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00035-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00009-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00021-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00039-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00047-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00017-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00019-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00020-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00004-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00034-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00002-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00033-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00038-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00043-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00053-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00024-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00031-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00025-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00026-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00051-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00001-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00042-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00015-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00007-of-00053.bin filter=lfs diff=lfs merge=lfs -text
+pytorch_model-00018-of-00053.bin filter=lfs diff=lfs merge=lfs -text
diff --git a/config.json b/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..82b194d760d0395c6db5e144d169e50a47a9a4a8
--- /dev/null
+++ b/config.json
@@ -0,0 +1,27 @@
+{
+ "architectures": [
+ "SkyworkForCausalLM"
+ ],
+ "auto_map": {
+ "AutoConfig": "configuration_skywork.SkyworkConfig",
+ "AutoModelForCausalLM": "modeling_skywork.SkyworkForCausalLM"
+ },
+ "bos_token_id": 1,
+ "eos_token_id": 2,
+ "pad_token_id": 0,
+ "hidden_act": "silu",
+ "hidden_size": 4608,
+ "initializer_range": 0.01,
+ "intermediate_size": 12288,
+ "max_position_embeddings": 4096,
+ "model_type": "skywork",
+ "num_attention_heads": 36,
+ "num_hidden_layers": 52,
+ "num_key_value_heads": 36,
+ "rms_norm_eps": 1e-06,
+ "tie_word_embeddings": false,
+ "torch_dtype": "bfloat16",
+ "transformers_version": "4.33.1",
+ "use_cache": true,
+ "vocab_size": 65519
+}
\ No newline at end of file
diff --git a/configuration_skywork.py b/configuration_skywork.py
new file mode 100644
index 0000000000000000000000000000000000000000..a61c89d2004fd4143b0357794d67202fc026a090
--- /dev/null
+++ b/configuration_skywork.py
@@ -0,0 +1,76 @@
+# Copyright (c) SkyworkAI and the HuggingFace Inc. team. All rights reserved.
+# This code is built upon Huggingface's transformers repository.
+
+from transformers.configuration_utils import PretrainedConfig
+from transformers.utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+Skywork_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
+
+
+class SkyworkConfig(PretrainedConfig):
+
+ model_type = "skywork"
+ keys_to_ignore_at_inference = ["past_key_values"]
+
+ def __init__(
+ self,
+ vocab_size=32000,
+ hidden_size=4096,
+ intermediate_size=11008,
+ num_hidden_layers=32,
+ num_attention_heads=32,
+ num_key_value_heads=None,
+ hidden_act="silu",
+ max_position_embeddings=2048,
+ initializer_range=0.02,
+ rms_norm_eps=1e-6,
+ use_cache=True,
+ pad_token_id=0,
+ bos_token_id=1,
+ eos_token_id=2,
+ pretraining_tp=1,
+ tie_word_embeddings=False,
+ rope_scaling=None,
+ rope_theta=10000.0,
+ attention_bias=False,
+ use_flash_attention=False,
+ **kwargs,
+ ):
+ self.vocab_size = vocab_size
+ self.max_position_embeddings = max_position_embeddings
+ self.hidden_size = hidden_size
+ self.intermediate_size = intermediate_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+
+ # for backward compatibility
+ if num_key_value_heads is None:
+ num_key_value_heads = num_attention_heads
+
+ self.num_key_value_heads = num_key_value_heads
+ self.hidden_act = hidden_act
+ self.initializer_range = initializer_range
+ self.rms_norm_eps = rms_norm_eps
+ self.pretraining_tp = pretraining_tp
+ self.use_cache = use_cache
+ self.rope_scaling = rope_scaling
+ self.rope_theta = rope_theta
+ self.attention_bias = attention_bias
+ self.use_flash_attention = use_flash_attention
+ if self.use_flash_attention:
+ try:
+ from flash_attn.flash_attn_interface import flash_attn_varlen_func
+ from einops import rearrange
+ except:
+ raise ValueError("`use_flash_attention` requires Flash Attention 2+ and einops.\nTry `pip install einops` and installing Flash Attention from from https://github.com/Dao-AILab/flash-attention")
+
+ super().__init__(
+ pad_token_id=pad_token_id,
+ bos_token_id=bos_token_id,
+ eos_token_id=eos_token_id,
+ tie_word_embeddings=tie_word_embeddings,
+ **kwargs,
+ )
diff --git a/generation_config.json b/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..7a52fbdba828d4f91fc5ba4be3b92a099a3e9cc3
--- /dev/null
+++ b/generation_config.json
@@ -0,0 +1,10 @@
+{
+ "bos_token_id": 1,
+ "do_sample": true,
+ "eos_token_id": 2,
+ "max_length": 4096,
+ "pad_token_id": 0,
+ "temperature": 0.6,
+ "top_p": 0.9,
+ "transformers_version": "4.34.0"
+}
\ No newline at end of file
diff --git a/modeling_skywork.py b/modeling_skywork.py
new file mode 100644
index 0000000000000000000000000000000000000000..f915f3a69753844c5045ed6cc0516dc4ee7131ae
--- /dev/null
+++ b/modeling_skywork.py
@@ -0,0 +1,1111 @@
+# Copyright (c) SkyworkAI and the HuggingFace Inc. team. All rights reserved.
+# This code is built upon Huggingface's transformers repository.
+import math
+from typing import List, Optional, Tuple, Union
+
+import torch
+import torch.nn.functional as F
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
+
+from transformers.activations import ACT2FN
+from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
+from transformers.modeling_utils import PreTrainedModel
+from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS
+from transformers.utils import (
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ is_flash_attn_available,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_skywork import SkyworkConfig
+
+
+if is_flash_attn_available():
+ from flash_attn import flash_attn_func, flash_attn_varlen_func
+ from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
+
+
+logger = logging.get_logger(__name__)
+
+_CONFIG_FOR_DOC = "SkyworkConfig"
+
+
+def _get_unpad_data(padding_mask):
+ seqlens_in_batch = padding_mask.sum(dim=-1, dtype=torch.int32)
+ indices = torch.nonzero(padding_mask.flatten(), as_tuple=False).flatten()
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0))
+ return (
+ indices,
+ cu_seqlens,
+ max_seqlen_in_batch,
+ )
+
+
+# Copied from transformers.models.bart.modeling_bart._make_causal_mask
+def _make_causal_mask(
+ input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
+):
+ """
+ Make causal mask used for bi-directional self-attention.
+ """
+ bsz, tgt_len = input_ids_shape
+ mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device)
+ mask_cond = torch.arange(mask.size(-1), device=device)
+ mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
+ mask = mask.to(dtype)
+
+ if past_key_values_length > 0:
+ mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
+ return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
+
+
+# Copied from transformers.models.bart.modeling_bart._expand_mask
+def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
+ """
+ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
+ """
+ bsz, src_len = mask.size()
+ tgt_len = tgt_len if tgt_len is not None else src_len
+
+ expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
+
+ inverted_mask = 1.0 - expanded_mask
+
+ return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
+
+
+class SkyworkRMSNorm(nn.Module):
+ def __init__(self, hidden_size, eps=1e-6):
+ """
+ SkyworkRMSNorm is equivalent to T5LayerNorm
+ """
+ super().__init__()
+ self.weight = nn.Parameter(torch.ones(hidden_size))
+ self.variance_epsilon = eps
+
+ def forward(self, hidden_states):
+ input_dtype = hidden_states.dtype
+ hidden_states = hidden_states.to(torch.float32)
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
+ return self.weight * hidden_states.to(input_dtype)
+
+
+ALL_LAYERNORM_LAYERS.append(SkyworkRMSNorm)
+
+
+class SkyworkRotaryEmbedding(nn.Module):
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
+ super().__init__()
+
+ self.dim = dim
+ self.max_position_embeddings = max_position_embeddings
+ self.base = base
+ inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
+
+ # Build here to make `torch.jit.trace` work.
+ self._set_cos_sin_cache(
+ seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()
+ )
+
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
+ self.max_seq_len_cached = seq_len
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)
+
+ freqs = torch.einsum("i,j->ij", t, self.inv_freq)
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
+ emb = torch.cat((freqs, freqs), dim=-1)
+ self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
+ self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
+
+ def forward(self, x, seq_len=None):
+ # x: [bs, num_attention_heads, seq_len, head_size]
+ if seq_len > self.max_seq_len_cached:
+ self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype)
+
+ return (
+ self.cos_cached[:seq_len].to(dtype=x.dtype),
+ self.sin_cached[:seq_len].to(dtype=x.dtype),
+ )
+
+
+class SkyworkLinearScalingRotaryEmbedding(SkyworkRotaryEmbedding):
+ """SkyworkRotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev"""
+
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
+ self.scaling_factor = scaling_factor
+ super().__init__(dim, max_position_embeddings, base, device)
+
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
+ self.max_seq_len_cached = seq_len
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)
+ t = t / self.scaling_factor
+
+ freqs = torch.einsum("i,j->ij", t, self.inv_freq)
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
+ emb = torch.cat((freqs, freqs), dim=-1)
+ self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
+ self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
+
+
+class SkyworkDynamicNTKScalingRotaryEmbedding(SkyworkRotaryEmbedding):
+ """SkyworkRotaryEmbedding extended with Dynamic NTK scaling. Credits to the Reddit users /u/bloc97 and /u/emozilla"""
+
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
+ self.scaling_factor = scaling_factor
+ super().__init__(dim, max_position_embeddings, base, device)
+
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
+ self.max_seq_len_cached = seq_len
+
+ if seq_len > self.max_position_embeddings:
+ base = self.base * (
+ (self.scaling_factor * seq_len / self.max_position_embeddings) - (self.scaling_factor - 1)
+ ) ** (self.dim / (self.dim - 2))
+ inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
+
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)
+
+ freqs = torch.einsum("i,j->ij", t, self.inv_freq)
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
+ emb = torch.cat((freqs, freqs), dim=-1)
+ self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
+ self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
+
+
+def rotate_half(x):
+ """Rotates half the hidden dims of the input."""
+ x1 = x[..., : x.shape[-1] // 2]
+ x2 = x[..., x.shape[-1] // 2 :]
+ return torch.cat((-x2, x1), dim=-1)
+
+
+# Copied from transformers.models.gpt_neox.modeling_gpt_neox.apply_rotary_pos_emb
+def apply_rotary_pos_emb(q, k, cos, sin, position_ids):
+ cos = cos[position_ids].unsqueeze(1) # [seq_len, dim] -> [batch_size, 1, seq_len, head_dim]
+ sin = sin[position_ids].unsqueeze(1)
+ q_embed = (q * cos) + (rotate_half(q) * sin)
+ k_embed = (k * cos) + (rotate_half(k) * sin)
+ return q_embed, k_embed
+
+
+class SkyworkMLP(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.hidden_size = config.hidden_size
+ self.intermediate_size = config.intermediate_size
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
+ self.act_fn = ACT2FN[config.hidden_act]
+
+ def forward(self, x):
+ if self.config.pretraining_tp > 1:
+ slice = self.intermediate_size // self.config.pretraining_tp
+ gate_proj_slices = self.gate_proj.weight.split(slice, dim=0)
+ up_proj_slices = self.up_proj.weight.split(slice, dim=0)
+ down_proj_slices = self.down_proj.weight.split(slice, dim=1)
+
+ gate_proj = torch.cat(
+ [F.linear(x, gate_proj_slices[i]) for i in range(self.config.pretraining_tp)], dim=-1
+ )
+ up_proj = torch.cat([F.linear(x, up_proj_slices[i]) for i in range(self.config.pretraining_tp)], dim=-1)
+
+ intermediate_states = (self.act_fn(gate_proj) * up_proj).split(slice, dim=2)
+ down_proj = [
+ F.linear(intermediate_states[i], down_proj_slices[i]) for i in range(self.config.pretraining_tp)
+ ]
+ down_proj = sum(down_proj)
+ else:
+ down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
+
+ return down_proj
+
+
+def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
+ """
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
+ """
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
+ if n_rep == 1:
+ return hidden_states
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
+
+
+class SkyworkAttention(nn.Module):
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
+
+ def __init__(self, config: SkyworkConfig):
+ super().__init__()
+ self.config = config
+ self.hidden_size = config.hidden_size
+ self.num_heads = config.num_attention_heads
+ self.head_dim = self.hidden_size // self.num_heads
+ self.num_key_value_heads = config.num_key_value_heads
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
+ self.max_position_embeddings = config.max_position_embeddings
+ self.rope_theta = config.rope_theta
+
+ if (self.head_dim * self.num_heads) != self.hidden_size:
+ raise ValueError(
+ f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
+ f" and `num_heads`: {self.num_heads})."
+ )
+ self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias)
+ self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
+ self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
+ self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=config.attention_bias)
+ self._init_rope()
+
+ def _init_rope(self):
+ if self.config.rope_scaling is None:
+ self.rotary_emb = SkyworkRotaryEmbedding(
+ self.head_dim,
+ max_position_embeddings=self.max_position_embeddings,
+ base=self.rope_theta,
+ )
+ else:
+ scaling_type = self.config.rope_scaling["type"]
+ scaling_factor = self.config.rope_scaling["factor"]
+ if scaling_type == "linear":
+ self.rotary_emb = SkyworkLinearScalingRotaryEmbedding(
+ self.head_dim,
+ max_position_embeddings=self.max_position_embeddings,
+ scaling_factor=scaling_factor,
+ base=self.rope_theta,
+ )
+ elif scaling_type == "dynamic":
+ self.rotary_emb = SkyworkDynamicNTKScalingRotaryEmbedding(
+ self.head_dim,
+ max_position_embeddings=self.max_position_embeddings,
+ scaling_factor=scaling_factor,
+ base=self.rope_theta,
+ )
+ else:
+ raise ValueError(f"Unknown RoPE scaling type {scaling_type}")
+
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ output_attentions: bool = False,
+ use_cache: bool = False,
+ padding_mask: Optional[torch.LongTensor] = None,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ bsz, q_len, _ = hidden_states.size()
+
+ if self.config.pretraining_tp > 1:
+ key_value_slicing = (self.num_key_value_heads * self.head_dim) // self.config.pretraining_tp
+ query_slices = self.q_proj.weight.split(
+ (self.num_heads * self.head_dim) // self.config.pretraining_tp, dim=0
+ )
+ key_slices = self.k_proj.weight.split(key_value_slicing, dim=0)
+ value_slices = self.v_proj.weight.split(key_value_slicing, dim=0)
+
+ query_states = [F.linear(hidden_states, query_slices[i]) for i in range(self.config.pretraining_tp)]
+ query_states = torch.cat(query_states, dim=-1)
+
+ key_states = [F.linear(hidden_states, key_slices[i]) for i in range(self.config.pretraining_tp)]
+ key_states = torch.cat(key_states, dim=-1)
+
+ value_states = [F.linear(hidden_states, value_slices[i]) for i in range(self.config.pretraining_tp)]
+ value_states = torch.cat(value_states, dim=-1)
+
+ else:
+ query_states = self.q_proj(hidden_states)
+ key_states = self.k_proj(hidden_states)
+ value_states = self.v_proj(hidden_states)
+
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
+
+ kv_seq_len = key_states.shape[-2]
+ if past_key_value is not None:
+ kv_seq_len += past_key_value[0].shape[-2]
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
+
+ if past_key_value is not None:
+ # reuse k, v, self_attention
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
+
+ past_key_value = (key_states, value_states) if use_cache else None
+
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
+
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
+
+ if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
+ raise ValueError(
+ f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is"
+ f" {attn_weights.size()}"
+ )
+
+ if attention_mask is not None:
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
+ raise ValueError(
+ f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
+ )
+ attn_weights = attn_weights + attention_mask
+
+ # upcast attention to fp32
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
+ attn_output = torch.matmul(attn_weights, value_states)
+
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
+ raise ValueError(
+ f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
+ f" {attn_output.size()}"
+ )
+
+ attn_output = attn_output.transpose(1, 2).contiguous()
+
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
+
+ if self.config.pretraining_tp > 1:
+ attn_output = attn_output.split(self.hidden_size // self.config.pretraining_tp, dim=2)
+ o_proj_slices = self.o_proj.weight.split(self.hidden_size // self.config.pretraining_tp, dim=1)
+ attn_output = sum([F.linear(attn_output[i], o_proj_slices[i]) for i in range(self.config.pretraining_tp)])
+ else:
+ attn_output = self.o_proj(attn_output)
+
+ if not output_attentions:
+ attn_weights = None
+
+ return attn_output, attn_weights, past_key_value
+
+
+class SkyworkFlashAttention2(SkyworkAttention):
+ """
+ Skywork flash attention module. This module inherits from `SkyworkAttention` as the weights of the module stays
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
+ flash attention and deal with padding tokens in case the input contains any of them.
+ """
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ output_attentions: bool = False,
+ use_cache: bool = False,
+ padding_mask: Optional[torch.LongTensor] = None,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ # SkyworkFlashAttention2 attention does not support output_attentions
+ output_attentions = False
+
+ bsz, q_len, _ = hidden_states.size()
+
+ query_states = self.q_proj(hidden_states)
+ key_states = self.k_proj(hidden_states)
+ value_states = self.v_proj(hidden_states)
+
+ # Flash attention requires the input to have the shape
+ # batch_size x seq_length x head_dime x hidden_dim
+ # therefore we just need to keep the original shape
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
+
+ kv_seq_len = key_states.shape[-2]
+ if past_key_value is not None:
+ kv_seq_len += past_key_value[0].shape[-2]
+
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
+
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
+
+ if past_key_value is not None:
+ # reuse k, v, self_attention
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
+
+ past_key_value = (key_states, value_states) if use_cache else None
+
+ query_states = query_states.transpose(1, 2)
+ key_states = key_states.transpose(1, 2)
+ value_states = value_states.transpose(1, 2)
+
+ # TODO: skywork does not have dropout in the config??
+ # It is recommended to use dropout with FA according to the docs
+ # when training.
+ dropout_rate = 0.0 # if not self.training else self.attn_dropout
+
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
+ # cast them back in float16 just to be sure everything works as expected.
+ # This might slowdown training & inference so it is recommended to not cast the LayerNorms
+ # in fp32. (SkyworkRMSNorm handles it correctly)
+ input_dtype = query_states.dtype
+ if input_dtype == torch.float32:
+ logger.warning_once(
+ "The input hidden states seems to be silently casted in float32, this might be related to"
+ " the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
+ " float16."
+ )
+
+ query_states = query_states.to(torch.float16)
+ key_states = key_states.to(torch.float16)
+ value_states = value_states.to(torch.float16)
+
+ attn_output = self._flash_attention_forward(
+ query_states, key_states, value_states, padding_mask, q_len, dropout=dropout_rate
+ )
+
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
+ attn_output = self.o_proj(attn_output)
+
+ if not output_attentions:
+ attn_weights = None
+
+ return attn_output, attn_weights, past_key_value
+
+ def _flash_attention_forward(
+ self, query_states, key_states, value_states, padding_mask, query_length, dropout=0.0, softmax_scale=None
+ ):
+ """
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
+ first unpad the input, then computes the attention scores and pad the final attention scores.
+
+ Args:
+ query_states (`torch.Tensor`):
+ Input query states to be passed to Flash Attention API
+ key_states (`torch.Tensor`):
+ Input key states to be passed to Flash Attention API
+ value_states (`torch.Tensor`):
+ Input value states to be passed to Flash Attention API
+ padding_mask (`torch.Tensor`):
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
+ position of padding tokens and 1 for the position of non-padding tokens.
+ dropout (`int`, *optional*):
+ Attention dropout
+ softmax_scale (`float`, *optional*):
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
+ """
+ # Contains at least one padding token in the sequence
+ if padding_mask is not None:
+ batch_size = query_states.shape[0]
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
+ query_states, key_states, value_states, padding_mask, query_length
+ )
+
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
+
+ attn_output_unpad = flash_attn_varlen_func(
+ query_states,
+ key_states,
+ value_states,
+ cu_seqlens_q=cu_seqlens_q,
+ cu_seqlens_k=cu_seqlens_k,
+ max_seqlen_q=max_seqlen_in_batch_q,
+ max_seqlen_k=max_seqlen_in_batch_k,
+ dropout_p=dropout,
+ softmax_scale=softmax_scale,
+ causal=True,
+ )
+
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
+ else:
+ attn_output = flash_attn_func(
+ query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=True
+ )
+
+ return attn_output
+
+ def _upad_input(self, query_layer, key_layer, value_layer, padding_mask, query_length):
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(padding_mask)
+ batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
+
+ key_layer = index_first_axis(
+ key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
+ )
+ value_layer = index_first_axis(
+ value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
+ )
+ if query_length == kv_seq_len:
+ query_layer = index_first_axis(
+ query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k
+ )
+ cu_seqlens_q = cu_seqlens_k
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
+ indices_q = indices_k
+ elif query_length == 1:
+ max_seqlen_in_batch_q = 1
+ cu_seqlens_q = torch.arange(
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
+ ) # There is a memcpy here, that is very bad.
+ indices_q = cu_seqlens_q[:-1]
+ query_layer = query_layer.squeeze(1)
+ else:
+ # The -q_len: slice assumes left padding.
+ padding_mask = padding_mask[:, -query_length:]
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, padding_mask)
+
+ return (
+ query_layer,
+ key_layer,
+ value_layer,
+ indices_q,
+ (cu_seqlens_q, cu_seqlens_k),
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
+ )
+
+
+class SkyworkDecoderLayer(nn.Module):
+ def __init__(self, config: SkyworkConfig):
+ super().__init__()
+ self.hidden_size = config.hidden_size
+ self.self_attn = (
+ SkyworkAttention(config=config)
+ if not getattr(config, "_flash_attn_2_enabled", False)
+ else SkyworkFlashAttention2(config=config)
+ )
+ self.mlp = SkyworkMLP(config)
+ self.input_layernorm = SkyworkRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
+ self.post_attention_layernorm = SkyworkRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ output_attentions: Optional[bool] = False,
+ use_cache: Optional[bool] = False,
+ padding_mask: Optional[torch.LongTensor] = None,
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
+ (see `past_key_values`).
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
+ """
+
+ residual = hidden_states
+
+ hidden_states = self.input_layernorm(hidden_states)
+
+ # Self Attention
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_value=past_key_value,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ padding_mask=padding_mask,
+ )
+ hidden_states = residual + hidden_states
+
+ # Fully Connected
+ residual = hidden_states
+ hidden_states = self.post_attention_layernorm(hidden_states)
+ hidden_states = self.mlp(hidden_states)
+ hidden_states = residual + hidden_states
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (self_attn_weights,)
+
+ if use_cache:
+ outputs += (present_key_value,)
+
+ return outputs
+
+class SkyworkPreTrainedModel(PreTrainedModel):
+ config_class = SkyworkConfig
+ base_model_prefix = "model"
+ supports_gradient_checkpointing = True
+ _no_split_modules = ["SkyworkDecoderLayer"]
+ _skip_keys_device_placement = "past_key_values"
+ _supports_flash_attn_2 = True
+
+ def _init_weights(self, module):
+ std = self.config.initializer_range
+ if isinstance(module, nn.Linear):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+
+ def _set_gradient_checkpointing(self, module, value=False):
+ if isinstance(module, SkyworkModel):
+ module.gradient_checkpointing = value
+
+class SkyworkModel(SkyworkPreTrainedModel):
+ """
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`SkyworkDecoderLayer`]
+
+ Args:
+ config: SkyworkConfig
+ """
+
+ def __init__(self, config: SkyworkConfig):
+ super().__init__(config)
+ self.padding_idx = config.pad_token_id
+ self.vocab_size = config.vocab_size
+
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
+ self.layers = nn.ModuleList([SkyworkDecoderLayer(config) for _ in range(config.num_hidden_layers)])
+ self.norm = SkyworkRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
+
+ self.gradient_checkpointing = False
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.embed_tokens = value
+
+ # Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask
+ def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
+ # create causal mask
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
+ combined_attention_mask = None
+ if input_shape[-1] > 1:
+ combined_attention_mask = _make_causal_mask(
+ input_shape,
+ inputs_embeds.dtype,
+ device=inputs_embeds.device,
+ past_key_values_length=past_key_values_length,
+ )
+
+ if attention_mask is not None:
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
+ expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to(
+ inputs_embeds.device
+ )
+ combined_attention_mask = (
+ expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
+ )
+
+ return combined_attention_mask
+
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # retrieve input_ids and inputs_embeds
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ batch_size, seq_length = input_ids.shape
+ elif inputs_embeds is not None:
+ batch_size, seq_length, _ = inputs_embeds.shape
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ seq_length_with_past = seq_length
+ past_key_values_length = 0
+
+ if past_key_values is not None:
+ past_key_values_length = past_key_values[0][0].shape[2]
+ seq_length_with_past = seq_length_with_past + past_key_values_length
+
+ if position_ids is None:
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
+ position_ids = torch.arange(
+ past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
+ )
+ position_ids = position_ids.unsqueeze(0)
+
+ if inputs_embeds is None:
+ inputs_embeds = self.embed_tokens(input_ids)
+ # embed positions
+ if attention_mask is None:
+ attention_mask = torch.ones(
+ (batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device
+ )
+ padding_mask = None
+ else:
+ if 0 in attention_mask:
+ padding_mask = attention_mask
+ else:
+ padding_mask = None
+
+ attention_mask = self._prepare_decoder_attention_mask(
+ attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length
+ )
+
+ hidden_states = inputs_embeds
+
+ if self.gradient_checkpointing and self.training:
+ if use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
+ )
+ use_cache = False
+
+ # decoder layers
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attns = () if output_attentions else None
+ next_decoder_cache = () if use_cache else None
+
+ for idx, decoder_layer in enumerate(self.layers):
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
+
+ if self.gradient_checkpointing and self.training:
+
+ def create_custom_forward(module):
+ def custom_forward(*inputs):
+ # None for past_key_value
+ return module(*inputs, past_key_value, output_attentions, padding_mask=padding_mask)
+
+ return custom_forward
+
+ layer_outputs = torch.utils.checkpoint.checkpoint(
+ create_custom_forward(decoder_layer), hidden_states, attention_mask, position_ids
+ )
+ else:
+ layer_outputs = decoder_layer(
+ hidden_states,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_value=past_key_value,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ padding_mask=padding_mask,
+ )
+
+ hidden_states = layer_outputs[0]
+
+ if use_cache:
+ next_decoder_cache += (layer_outputs[2 if output_attentions else 1],)
+
+ if output_attentions:
+ all_self_attns += (layer_outputs[1],)
+
+ hidden_states = self.norm(hidden_states)
+
+ # add hidden states from the last decoder layer
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ next_cache = next_decoder_cache if use_cache else None
+ if not return_dict:
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
+ return BaseModelOutputWithPast(
+ last_hidden_state=hidden_states,
+ past_key_values=next_cache,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attns,
+ )
+
+
+class SkyworkForCausalLM(SkyworkPreTrainedModel):
+ _tied_weights_keys = ["lm_head.weight"]
+
+ def __init__(self, config):
+ super().__init__(config)
+ self.model = SkyworkModel(config)
+ self.vocab_size = config.vocab_size
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.model.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.model.embed_tokens = value
+
+ def get_output_embeddings(self):
+ return self.lm_head
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head = new_embeddings
+
+ def set_decoder(self, decoder):
+ self.model = decoder
+
+ def get_decoder(self):
+ return self.model
+
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
+ r"""
+ Args:
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, SkyworkForCausalLM
+
+ >>> model = SkyworkForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)
+ >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)
+
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
+
+ >>> # Generate
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
+ ```"""
+
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
+ outputs = self.model(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ hidden_states = outputs[0]
+ if self.config.pretraining_tp > 1:
+ lm_head_slices = self.lm_head.weight.split(self.vocab_size // self.config.pretraining_tp, dim=0)
+ logits = [F.linear(hidden_states, lm_head_slices[i]) for i in range(self.config.pretraining_tp)]
+ logits = torch.cat(logits, dim=-1)
+ else:
+ logits = self.lm_head(hidden_states)
+ logits = logits.float()
+
+ loss = None
+ if labels is not None:
+ # Shift so that tokens < n predict n
+ shift_logits = logits[..., :-1, :].contiguous()
+ shift_labels = labels[..., 1:].contiguous()
+ # Flatten the tokens
+ loss_fct = CrossEntropyLoss()
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
+ shift_labels = shift_labels.view(-1)
+ # Enable model parallelism
+ shift_labels = shift_labels.to(shift_logits.device)
+ loss = loss_fct(shift_logits, shift_labels)
+
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ return (loss,) + output if loss is not None else output
+
+ return CausalLMOutputWithPast(
+ loss=loss,
+ logits=logits,
+ past_key_values=outputs.past_key_values,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def prepare_inputs_for_generation(
+ self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
+ ):
+ if past_key_values:
+ input_ids = input_ids[:, -1:]
+
+ position_ids = kwargs.get("position_ids", None)
+ if attention_mask is not None and position_ids is None:
+ # create position_ids on the fly for batch generation
+ position_ids = attention_mask.long().cumsum(-1) - 1
+ position_ids.masked_fill_(attention_mask == 0, 1)
+ if past_key_values:
+ position_ids = position_ids[:, -1].unsqueeze(-1)
+
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
+ if inputs_embeds is not None and past_key_values is None:
+ model_inputs = {"inputs_embeds": inputs_embeds}
+ else:
+ model_inputs = {"input_ids": input_ids}
+
+ model_inputs.update(
+ {
+ "position_ids": position_ids,
+ "past_key_values": past_key_values,
+ "use_cache": kwargs.get("use_cache"),
+ "attention_mask": attention_mask,
+ }
+ )
+ return model_inputs
+
+ @staticmethod
+ def _reorder_cache(past_key_values, beam_idx):
+ reordered_past = ()
+ for layer_past in past_key_values:
+ reordered_past += (
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
+ )
+ return reordered_past
+
+class SkyworkForSequenceClassification(SkyworkPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+ self.model = SkyworkModel(config)
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.model.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.model.embed_tokens = value
+
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ transformer_outputs = self.model(
+ input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ hidden_states = transformer_outputs[0]
+ logits = self.score(hidden_states)
+
+ if input_ids is not None:
+ batch_size = input_ids.shape[0]
+ else:
+ batch_size = inputs_embeds.shape[0]
+
+ if self.config.pad_token_id is None and batch_size != 1:
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
+ if self.config.pad_token_id is None:
+ sequence_lengths = -1
+ else:
+ if input_ids is not None:
+ sequence_lengths = (torch.eq(input_ids, self.config.pad_token_id).long().argmax(-1) - 1).to(
+ logits.device
+ )
+ else:
+ sequence_lengths = -1
+
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
+
+ loss = None
+ if labels is not None:
+ labels = labels.to(logits.device)
+ if self.config.problem_type is None:
+ if self.num_labels == 1:
+ self.config.problem_type = "regression"
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+ self.config.problem_type = "single_label_classification"
+ else:
+ self.config.problem_type = "multi_label_classification"
+
+ if self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.num_labels == 1:
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(pooled_logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(pooled_logits, labels)
+ if not return_dict:
+ output = (pooled_logits,) + transformer_outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return SequenceClassifierOutputWithPast(
+ loss=loss,
+ logits=pooled_logits,
+ past_key_values=transformer_outputs.past_key_values,
+ hidden_states=transformer_outputs.hidden_states,
+ attentions=transformer_outputs.attentions,
+ )
\ No newline at end of file
diff --git a/pytorch_model-00001-of-00053.bin b/pytorch_model-00001-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..2ac39e729e1c8a84dae673ab77f570335a178c22
--- /dev/null
+++ b/pytorch_model-00001-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:68940720301857dba24c7f68a2560e165076a577622da9c3ecef68f7963f5cff
+size 509629447
diff --git a/pytorch_model-00002-of-00053.bin b/pytorch_model-00002-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..5dda57502d5a4d393d8d6d706c6deda132307503
--- /dev/null
+++ b/pytorch_model-00002-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e205c6182bb23ed7e38bc67a371a7fd73550720f04812a6d2d40f935dd0e23a1
+size 509629447
diff --git a/pytorch_model-00003-of-00053.bin b/pytorch_model-00003-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..94dc42c7531630048d966d6a25f014ba906b2fa6
--- /dev/null
+++ b/pytorch_model-00003-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d28f1778e9d1c25129ce2c1efa57bb9a4f0481f20ea599e838ead0b0503fe51b
+size 509629447
diff --git a/pytorch_model-00004-of-00053.bin b/pytorch_model-00004-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..6a8955d29ecd5c06559c8ac8fc8661b77cdbbaa5
--- /dev/null
+++ b/pytorch_model-00004-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:492050395a7cb564ef08be3ae8d1c41b045a0946e5b3963f9eccf2d7a83c756e
+size 509629447
diff --git a/pytorch_model-00005-of-00053.bin b/pytorch_model-00005-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..334ed28a94e4437436a529fe2eb0fcab2be6e315
--- /dev/null
+++ b/pytorch_model-00005-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9754fc15b494d09fb7e69061945e99f3581c04b92155615eaac613f795f90af2
+size 509629447
diff --git a/pytorch_model-00006-of-00053.bin b/pytorch_model-00006-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..67d1d9081e427a0fbfb5d66715bbd2835cb581ce
--- /dev/null
+++ b/pytorch_model-00006-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:289188fd00cb91892ff5892c318a1c95ca7f116b26f61f1333da002591b8aa4e
+size 509629447
diff --git a/pytorch_model-00007-of-00053.bin b/pytorch_model-00007-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..11229b4cae0fe47291424f666f29a5c0cbe1d5c1
--- /dev/null
+++ b/pytorch_model-00007-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6df94cae029ebf292724c215cc98278a6f5094dd389ee737010aae267b4615f0
+size 509629447
diff --git a/pytorch_model-00008-of-00053.bin b/pytorch_model-00008-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..9aa7793b63e3ae2cc8a1f7ab9db83f34242d4acf
--- /dev/null
+++ b/pytorch_model-00008-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2f1fbf1ecdea5d2e884a9f5b4f490cd25190ca6e136e21b6a5742502a7314ce1
+size 509629447
diff --git a/pytorch_model-00009-of-00053.bin b/pytorch_model-00009-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..da841b7797f1fb998639052f921415f1c58869bb
--- /dev/null
+++ b/pytorch_model-00009-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:662acb1bbf08baa2339cbb5a1b6751d9f666a5b4bdc5b3e2bae0dd3e9a978816
+size 509629447
diff --git a/pytorch_model-00010-of-00053.bin b/pytorch_model-00010-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..29d7dd648e19dc2a8bee5fff6ac62c9a01bd91a7
--- /dev/null
+++ b/pytorch_model-00010-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:38147ea6887d0daeb9ffb680e50fdda895aaa00fbd7abb3831da45443f335bce
+size 509629447
diff --git a/pytorch_model-00011-of-00053.bin b/pytorch_model-00011-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..a8da5f1746928d06b3f12ab1d1e677ab95d282b2
--- /dev/null
+++ b/pytorch_model-00011-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:302cf0a909d5da00e667f58c982d59d3fc69afb8e51082e568f015da170cdf17
+size 509629511
diff --git a/pytorch_model-00012-of-00053.bin b/pytorch_model-00012-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..5cfbcd098b24b0924500659f46b8042343cd0ed5
--- /dev/null
+++ b/pytorch_model-00012-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ceff426fbfaf9a6911d36fdaf5d7bc098506fe0bae5cd006f1a7a76f738a0f09
+size 509629511
diff --git a/pytorch_model-00013-of-00053.bin b/pytorch_model-00013-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..8dd8b998a09ece1ffe8039d66cc1fa6c252bf143
--- /dev/null
+++ b/pytorch_model-00013-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:63fa9883b515fafbd78d935e9570710b1bdd244c81a4743337b66dd402a2e288
+size 509629511
diff --git a/pytorch_model-00014-of-00053.bin b/pytorch_model-00014-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..a85c6678230d58cd3a4ef63ee6a2de4e68cf2419
--- /dev/null
+++ b/pytorch_model-00014-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:29a39a5d252dc31a4b60a20cc4d3651931062e0bdd97391005c761877e00f84e
+size 509629511
diff --git a/pytorch_model-00015-of-00053.bin b/pytorch_model-00015-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..c29f3080800edf683f9c25eed8b71639ca7d87a7
--- /dev/null
+++ b/pytorch_model-00015-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fbbdd55cb635c442b06906a76a36500ebfa8ec5a6fd8d33a99123f2761aa0598
+size 509629511
diff --git a/pytorch_model-00016-of-00053.bin b/pytorch_model-00016-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..38b381c3c205f01228b1c55fb9dd24bcdd5080f3
--- /dev/null
+++ b/pytorch_model-00016-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4afa21eaf0a5ca8341d7dcec5b76dba960f797de9ac2fca96b6076a0cb137826
+size 509629511
diff --git a/pytorch_model-00017-of-00053.bin b/pytorch_model-00017-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..8cecf2c5ebc78120ebd3bb8f38dbf06d52ba7a36
--- /dev/null
+++ b/pytorch_model-00017-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9862fb9399bf64c00f5d7956d7c47e390a8fe58a2a9fe20b1dec66ffaa77e486
+size 509629511
diff --git a/pytorch_model-00018-of-00053.bin b/pytorch_model-00018-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..457184605fc5d7685e854379e4e77b2b285e35df
--- /dev/null
+++ b/pytorch_model-00018-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b1e97e33b7d6de423a52c2cbfcfb6ab1ec64a47b73c5680fe1acb6664b613af7
+size 509629511
diff --git a/pytorch_model-00019-of-00053.bin b/pytorch_model-00019-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..9f1c95485682c7f8cbedb5235de5f87034219cfd
--- /dev/null
+++ b/pytorch_model-00019-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0bc3079f15219c1f453a21aaa410a14cdd8aa3416e0c37ec8c5ddb1c708eccbb
+size 509629511
diff --git a/pytorch_model-00020-of-00053.bin b/pytorch_model-00020-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..b442f6b428ffddd08fb4c57565bd48540a9f1233
--- /dev/null
+++ b/pytorch_model-00020-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b3b0b92ea03f7e8e457b3362b6b72aa814294b562a173613c3763103973be8f2
+size 509629511
diff --git a/pytorch_model-00021-of-00053.bin b/pytorch_model-00021-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..ddeea49bab60f48d31e6008d319d84d910cddfdd
--- /dev/null
+++ b/pytorch_model-00021-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:410de3915493f0c984db3636d195b79e29c36a8a04070a6f84499e7ba30228b9
+size 509629511
diff --git a/pytorch_model-00022-of-00053.bin b/pytorch_model-00022-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..04a77ed7a67339341f03fc026bd9f4417dff8e99
--- /dev/null
+++ b/pytorch_model-00022-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:faf930f0ee2069fd60baff35cb8015274abe05f83878407fc405287ebbf1ff0d
+size 509629511
diff --git a/pytorch_model-00023-of-00053.bin b/pytorch_model-00023-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..f99621bbe83cd5204857ee9f488ce26f8533ac90
--- /dev/null
+++ b/pytorch_model-00023-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:23feb32029849bbacc1ad52089ca50cdc0ccbd1847db5bcd740c6070c89ed8fe
+size 509629511
diff --git a/pytorch_model-00024-of-00053.bin b/pytorch_model-00024-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..a75c2c827bbf46ccad1a035b30085eba26d4187a
--- /dev/null
+++ b/pytorch_model-00024-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:57a66e21dbdbd69f0bdc0531c334d2e1d718d8a5f2f1c81fe4c7d8e328978f7c
+size 509629511
diff --git a/pytorch_model-00025-of-00053.bin b/pytorch_model-00025-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..7a0e4489c556f2b8c751412d4e5e8936d7bd99a6
--- /dev/null
+++ b/pytorch_model-00025-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e225e949dc5185b44f1c210cf027306991f1299080f06289013644dd426c905c
+size 509629511
diff --git a/pytorch_model-00026-of-00053.bin b/pytorch_model-00026-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..277f23149e21c72bdc999a5fea846851594b73c2
--- /dev/null
+++ b/pytorch_model-00026-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1f21ed93e2c3b7d5ce4f35b018dd9662e2ff9aca328939715c8e538f9dae6a51
+size 509629511
diff --git a/pytorch_model-00027-of-00053.bin b/pytorch_model-00027-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..8b0955ea08febabe3a8cb9bbbccc8b2b16929103
--- /dev/null
+++ b/pytorch_model-00027-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:959c22c54dc5e35033e061c95a2cf6579d72c64263ef969337df463ed240023e
+size 509629511
diff --git a/pytorch_model-00028-of-00053.bin b/pytorch_model-00028-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..40eadc0c8b04c079e842cb8d1705dfb6609b741c
--- /dev/null
+++ b/pytorch_model-00028-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:eb537adaac04127fb050014845363a3b929b9d0d1b9a15c9f853ebe8479c0b00
+size 509629511
diff --git a/pytorch_model-00029-of-00053.bin b/pytorch_model-00029-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..f962374997940fb817e1ff276971d7bfd9fb0567
--- /dev/null
+++ b/pytorch_model-00029-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:796cd3b42ce08e128f8f84bf5e9f9ea174f63a13711349cbfe8b9d4eb08d4814
+size 509629511
diff --git a/pytorch_model-00030-of-00053.bin b/pytorch_model-00030-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..a52b047514ec057e363147a8cbfcd3d058d3936f
--- /dev/null
+++ b/pytorch_model-00030-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1a2713d082266c6f286d093399910ef7542bf25b5d41680ecf2e92918b2ffde3
+size 509629511
diff --git a/pytorch_model-00031-of-00053.bin b/pytorch_model-00031-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..fb4e057e70f592ffe4e19020ed01c48141358bc0
--- /dev/null
+++ b/pytorch_model-00031-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:85fae9975fa5af1d748200bae1a47e09a8ebd942c8b37a312044cbd0d8654345
+size 509629511
diff --git a/pytorch_model-00032-of-00053.bin b/pytorch_model-00032-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..958c22fea5791ed5a516a7072870200c800c4190
--- /dev/null
+++ b/pytorch_model-00032-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6eb5b97e5cf86189304dfa7d163a266827ed4a50b8ea9bc14c7ff8cc117800e3
+size 509629511
diff --git a/pytorch_model-00033-of-00053.bin b/pytorch_model-00033-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..486b9b041f73bee314575f6703e0e4af2b100b1f
--- /dev/null
+++ b/pytorch_model-00033-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0a91cc0c084b84e4a7d18946f153ce4ae79723df5e7537b35612f6a2d0d78350
+size 509629511
diff --git a/pytorch_model-00034-of-00053.bin b/pytorch_model-00034-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..137d7de2602863a8b5ca04e9aea404eaf8578347
--- /dev/null
+++ b/pytorch_model-00034-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e06e3c2c0c61e96d282c46e858c681080c03fa8f501d7a5e5961c2e797560fb0
+size 509629511
diff --git a/pytorch_model-00035-of-00053.bin b/pytorch_model-00035-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..3c38f9cee7ad8b5ac2d7cec4d5c3385e21e4c1ae
--- /dev/null
+++ b/pytorch_model-00035-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:01617a64df649449201a8284bab3044fa8e61ce8ce4bb63d975bf5f580f79bff
+size 509629511
diff --git a/pytorch_model-00036-of-00053.bin b/pytorch_model-00036-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..36aa61cac4a583d4bafaa6e1869e3469571dedde
--- /dev/null
+++ b/pytorch_model-00036-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:45b2ca00bdb605af8bd785ba122268cbd491d8ec7bdaa597774edb489fcd0298
+size 509629511
diff --git a/pytorch_model-00037-of-00053.bin b/pytorch_model-00037-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..b81daa273663644eca1a727fcecf9f65ea21caf9
--- /dev/null
+++ b/pytorch_model-00037-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:139dbbcbb259f4dda361d015b87ea7f12ec19b6b8e0897fedd0e2a3614c3b37e
+size 509629511
diff --git a/pytorch_model-00038-of-00053.bin b/pytorch_model-00038-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..2b1dc6f71b9e4afad0f1e0d781bb70e0146f359d
--- /dev/null
+++ b/pytorch_model-00038-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:10ac7a95b8bde058ea0d5c7a8a55af1d4aed65d9ae12e286fec88b3a9256f636
+size 509629511
diff --git a/pytorch_model-00039-of-00053.bin b/pytorch_model-00039-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..4d8235f4c4b8ada8bdb9d63aa33fa9144e723eb7
--- /dev/null
+++ b/pytorch_model-00039-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9dece182f9bfb4f56d689437396ce2a65318d08aff9de5ede050eb4240e70a09
+size 509629511
diff --git a/pytorch_model-00040-of-00053.bin b/pytorch_model-00040-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..75cac0be8485ea057340e737450beae2ef9f38be
--- /dev/null
+++ b/pytorch_model-00040-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6cbd32d4ec97b7c2438be4fb3863251c09cf40a8ab6d5918935a5c7f4c620f20
+size 509629511
diff --git a/pytorch_model-00041-of-00053.bin b/pytorch_model-00041-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..13b9bf33a0b00d3122a8a9dcdbdc301675f6c772
--- /dev/null
+++ b/pytorch_model-00041-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8681905b13bbfe3384af79f9da47d7a05c79f8cb5e0c36f5e0626b20f9655e08
+size 509629511
diff --git a/pytorch_model-00042-of-00053.bin b/pytorch_model-00042-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..4bd492f1f4276c6b7a30e1b3d2ff35fc24cefd42
--- /dev/null
+++ b/pytorch_model-00042-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1e3c7df5786804b1f93dc1488847e0aecd23aa2b4c133ccd2476a26ad8a25dde
+size 509629511
diff --git a/pytorch_model-00043-of-00053.bin b/pytorch_model-00043-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..0e0a2a1b2495af42d6a63dac6551f0f6c7f00cfe
--- /dev/null
+++ b/pytorch_model-00043-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6c9ecdfb82b40f8b0185eb3f63271cd351d08a028d250e327f5c287ec726cf3c
+size 509629511
diff --git a/pytorch_model-00044-of-00053.bin b/pytorch_model-00044-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..aec40c0e4f970ac06e7ee9a7bd5da5c21224b4c4
--- /dev/null
+++ b/pytorch_model-00044-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:38c0296e8c1aff535c03b641d527142c54af30339261a11555ff949caf84f338
+size 509629511
diff --git a/pytorch_model-00045-of-00053.bin b/pytorch_model-00045-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..cd9613e6face18cf3065bd80407136068e08df9e
--- /dev/null
+++ b/pytorch_model-00045-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a628a44063a617d465cc8bc44deea8bbc0808042f8232b0f5ffb0b3a73feddea
+size 509629511
diff --git a/pytorch_model-00046-of-00053.bin b/pytorch_model-00046-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..2f10d87b20891c7f9fa4d214d19a3f1a63353745
--- /dev/null
+++ b/pytorch_model-00046-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c096d5b8f2aafaa5b117124061979353b84e590b80bd4c5eb683f43cedc7e4a6
+size 509629511
diff --git a/pytorch_model-00047-of-00053.bin b/pytorch_model-00047-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..242679c47e333eb4934449141880478195843f07
--- /dev/null
+++ b/pytorch_model-00047-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bce5d2a5d123520d4a539ecaef35c85f6b3ef1fdeb71c08cec272df5d6a94758
+size 509629511
diff --git a/pytorch_model-00048-of-00053.bin b/pytorch_model-00048-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..01f6321f5921e254e97ae887a1a6aadc4d985d66
--- /dev/null
+++ b/pytorch_model-00048-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:760fc07ac215be0a090b34a34635103e8a2394fb3555dc8bd91c9564c2b9e6d1
+size 509629511
diff --git a/pytorch_model-00049-of-00053.bin b/pytorch_model-00049-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..4410f30720a1a99307eff688a551842edbbc02d1
--- /dev/null
+++ b/pytorch_model-00049-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f0c4d923c6ea9ea5dadae1b76058eaee454ba9119d55b1115eef90d8b6146482
+size 509629511
diff --git a/pytorch_model-00050-of-00053.bin b/pytorch_model-00050-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..6b7a24e2475872f99dd309968daab6687f397b73
--- /dev/null
+++ b/pytorch_model-00050-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e5d99e5756f5c2e1330e41d5bab211b2c54e1d040f0e1c07efe9c804d2e60e15
+size 509629511
diff --git a/pytorch_model-00051-of-00053.bin b/pytorch_model-00051-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..e47dae98246b481e91d0b83cbc2580d867a372de
--- /dev/null
+++ b/pytorch_model-00051-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1b320759b4be5903937791af67d46f90a39514a8f89a78278402e974aa7c50ae
+size 509629511
diff --git a/pytorch_model-00052-of-00053.bin b/pytorch_model-00052-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..1c47d576da0ed311e97bada57b1221b654b58a71
--- /dev/null
+++ b/pytorch_model-00052-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:90b5ad5bd86b73cf455eb6d55659136d45d94e87df5dd7aff22b338cbc91d440
+size 509629511
diff --git a/pytorch_model-00053-of-00053.bin b/pytorch_model-00053-of-00053.bin
new file mode 100644
index 0000000000000000000000000000000000000000..183973cde33ac82acebef718aec1330dea02fdbb
--- /dev/null
+++ b/pytorch_model-00053-of-00053.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ed65f60067e0738d1e50f4b409bbdb4b5d810e9716e8940da5cdb2d2ba0af4f7
+size 1207656611
diff --git a/pytorch_model.bin.index.json b/pytorch_model.bin.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..163c37a78b34efe7cc858ea3fdca93e4c7c25699
--- /dev/null
+++ b/pytorch_model.bin.index.json
@@ -0,0 +1 @@
+{"metadata": {"total_size": 27708239872}, "weight_map": {"model.layers.0.input_layernorm.weight": "pytorch_model-00001-of-00053.bin", "model.layers.0.post_attention_layernorm.weight": "pytorch_model-00001-of-00053.bin", "model.layers.0.self_attn.q_proj.weight": "pytorch_model-00001-of-00053.bin", "model.layers.0.self_attn.k_proj.weight": "pytorch_model-00001-of-00053.bin", "model.layers.0.self_attn.v_proj.weight": "pytorch_model-00001-of-00053.bin", "model.layers.0.self_attn.o_proj.weight": "pytorch_model-00001-of-00053.bin", "model.layers.0.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00053.bin", "model.layers.0.mlp.gate_proj.weight": "pytorch_model-00001-of-00053.bin", "model.layers.0.mlp.up_proj.weight": "pytorch_model-00001-of-00053.bin", "model.layers.0.mlp.down_proj.weight": "pytorch_model-00001-of-00053.bin", "model.layers.1.input_layernorm.weight": "pytorch_model-00002-of-00053.bin", "model.layers.1.post_attention_layernorm.weight": "pytorch_model-00002-of-00053.bin", "model.layers.1.self_attn.q_proj.weight": "pytorch_model-00002-of-00053.bin", "model.layers.1.self_attn.k_proj.weight": "pytorch_model-00002-of-00053.bin", "model.layers.1.self_attn.v_proj.weight": "pytorch_model-00002-of-00053.bin", "model.layers.1.self_attn.o_proj.weight": "pytorch_model-00002-of-00053.bin", "model.layers.1.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00053.bin", "model.layers.1.mlp.gate_proj.weight": "pytorch_model-00002-of-00053.bin", "model.layers.1.mlp.up_proj.weight": "pytorch_model-00002-of-00053.bin", "model.layers.1.mlp.down_proj.weight": "pytorch_model-00002-of-00053.bin", "model.layers.2.input_layernorm.weight": "pytorch_model-00003-of-00053.bin", "model.layers.2.post_attention_layernorm.weight": "pytorch_model-00003-of-00053.bin", "model.layers.2.self_attn.q_proj.weight": "pytorch_model-00003-of-00053.bin", "model.layers.2.self_attn.k_proj.weight": "pytorch_model-00003-of-00053.bin", "model.layers.2.self_attn.v_proj.weight": "pytorch_model-00003-of-00053.bin", "model.layers.2.self_attn.o_proj.weight": "pytorch_model-00003-of-00053.bin", "model.layers.2.self_attn.rotary_emb.inv_freq": "pytorch_model-00003-of-00053.bin", "model.layers.2.mlp.gate_proj.weight": "pytorch_model-00003-of-00053.bin", "model.layers.2.mlp.up_proj.weight": "pytorch_model-00003-of-00053.bin", "model.layers.2.mlp.down_proj.weight": "pytorch_model-00003-of-00053.bin", "model.layers.3.input_layernorm.weight": "pytorch_model-00004-of-00053.bin", "model.layers.3.post_attention_layernorm.weight": "pytorch_model-00004-of-00053.bin", "model.layers.3.self_attn.q_proj.weight": "pytorch_model-00004-of-00053.bin", "model.layers.3.self_attn.k_proj.weight": "pytorch_model-00004-of-00053.bin", "model.layers.3.self_attn.v_proj.weight": "pytorch_model-00004-of-00053.bin", "model.layers.3.self_attn.o_proj.weight": "pytorch_model-00004-of-00053.bin", "model.layers.3.self_attn.rotary_emb.inv_freq": "pytorch_model-00004-of-00053.bin", "model.layers.3.mlp.gate_proj.weight": "pytorch_model-00004-of-00053.bin", "model.layers.3.mlp.up_proj.weight": "pytorch_model-00004-of-00053.bin", "model.layers.3.mlp.down_proj.weight": "pytorch_model-00004-of-00053.bin", "model.layers.4.input_layernorm.weight": "pytorch_model-00005-of-00053.bin", "model.layers.4.post_attention_layernorm.weight": "pytorch_model-00005-of-00053.bin", "model.layers.4.self_attn.q_proj.weight": "pytorch_model-00005-of-00053.bin", "model.layers.4.self_attn.k_proj.weight": "pytorch_model-00005-of-00053.bin", "model.layers.4.self_attn.v_proj.weight": "pytorch_model-00005-of-00053.bin", "model.layers.4.self_attn.o_proj.weight": "pytorch_model-00005-of-00053.bin", "model.layers.4.self_attn.rotary_emb.inv_freq": "pytorch_model-00005-of-00053.bin", "model.layers.4.mlp.gate_proj.weight": "pytorch_model-00005-of-00053.bin", "model.layers.4.mlp.up_proj.weight": "pytorch_model-00005-of-00053.bin", "model.layers.4.mlp.down_proj.weight": "pytorch_model-00005-of-00053.bin", "model.layers.5.input_layernorm.weight": "pytorch_model-00006-of-00053.bin", "model.layers.5.post_attention_layernorm.weight": "pytorch_model-00006-of-00053.bin", "model.layers.5.self_attn.q_proj.weight": "pytorch_model-00006-of-00053.bin", "model.layers.5.self_attn.k_proj.weight": "pytorch_model-00006-of-00053.bin", "model.layers.5.self_attn.v_proj.weight": "pytorch_model-00006-of-00053.bin", "model.layers.5.self_attn.o_proj.weight": "pytorch_model-00006-of-00053.bin", "model.layers.5.self_attn.rotary_emb.inv_freq": "pytorch_model-00006-of-00053.bin", "model.layers.5.mlp.gate_proj.weight": "pytorch_model-00006-of-00053.bin", "model.layers.5.mlp.up_proj.weight": "pytorch_model-00006-of-00053.bin", "model.layers.5.mlp.down_proj.weight": "pytorch_model-00006-of-00053.bin", "model.layers.6.input_layernorm.weight": "pytorch_model-00007-of-00053.bin", "model.layers.6.post_attention_layernorm.weight": "pytorch_model-00007-of-00053.bin", "model.layers.6.self_attn.q_proj.weight": "pytorch_model-00007-of-00053.bin", "model.layers.6.self_attn.k_proj.weight": "pytorch_model-00007-of-00053.bin", "model.layers.6.self_attn.v_proj.weight": "pytorch_model-00007-of-00053.bin", "model.layers.6.self_attn.o_proj.weight": "pytorch_model-00007-of-00053.bin", "model.layers.6.self_attn.rotary_emb.inv_freq": "pytorch_model-00007-of-00053.bin", "model.layers.6.mlp.gate_proj.weight": "pytorch_model-00007-of-00053.bin", "model.layers.6.mlp.up_proj.weight": "pytorch_model-00007-of-00053.bin", "model.layers.6.mlp.down_proj.weight": "pytorch_model-00007-of-00053.bin", "model.layers.7.input_layernorm.weight": "pytorch_model-00008-of-00053.bin", "model.layers.7.post_attention_layernorm.weight": "pytorch_model-00008-of-00053.bin", "model.layers.7.self_attn.q_proj.weight": "pytorch_model-00008-of-00053.bin", "model.layers.7.self_attn.k_proj.weight": "pytorch_model-00008-of-00053.bin", "model.layers.7.self_attn.v_proj.weight": "pytorch_model-00008-of-00053.bin", "model.layers.7.self_attn.o_proj.weight": "pytorch_model-00008-of-00053.bin", "model.layers.7.self_attn.rotary_emb.inv_freq": "pytorch_model-00008-of-00053.bin", "model.layers.7.mlp.gate_proj.weight": "pytorch_model-00008-of-00053.bin", "model.layers.7.mlp.up_proj.weight": "pytorch_model-00008-of-00053.bin", "model.layers.7.mlp.down_proj.weight": "pytorch_model-00008-of-00053.bin", "model.layers.8.input_layernorm.weight": "pytorch_model-00009-of-00053.bin", "model.layers.8.post_attention_layernorm.weight": "pytorch_model-00009-of-00053.bin", "model.layers.8.self_attn.q_proj.weight": "pytorch_model-00009-of-00053.bin", "model.layers.8.self_attn.k_proj.weight": "pytorch_model-00009-of-00053.bin", "model.layers.8.self_attn.v_proj.weight": "pytorch_model-00009-of-00053.bin", "model.layers.8.self_attn.o_proj.weight": "pytorch_model-00009-of-00053.bin", "model.layers.8.self_attn.rotary_emb.inv_freq": "pytorch_model-00009-of-00053.bin", "model.layers.8.mlp.gate_proj.weight": "pytorch_model-00009-of-00053.bin", "model.layers.8.mlp.up_proj.weight": "pytorch_model-00009-of-00053.bin", "model.layers.8.mlp.down_proj.weight": "pytorch_model-00009-of-00053.bin", "model.layers.9.input_layernorm.weight": "pytorch_model-00010-of-00053.bin", "model.layers.9.post_attention_layernorm.weight": "pytorch_model-00010-of-00053.bin", "model.layers.9.self_attn.q_proj.weight": "pytorch_model-00010-of-00053.bin", "model.layers.9.self_attn.k_proj.weight": "pytorch_model-00010-of-00053.bin", "model.layers.9.self_attn.v_proj.weight": "pytorch_model-00010-of-00053.bin", "model.layers.9.self_attn.o_proj.weight": "pytorch_model-00010-of-00053.bin", "model.layers.9.self_attn.rotary_emb.inv_freq": "pytorch_model-00010-of-00053.bin", "model.layers.9.mlp.gate_proj.weight": "pytorch_model-00010-of-00053.bin", "model.layers.9.mlp.up_proj.weight": "pytorch_model-00010-of-00053.bin", "model.layers.9.mlp.down_proj.weight": "pytorch_model-00010-of-00053.bin", "model.layers.10.input_layernorm.weight": "pytorch_model-00011-of-00053.bin", "model.layers.10.post_attention_layernorm.weight": "pytorch_model-00011-of-00053.bin", "model.layers.10.self_attn.q_proj.weight": "pytorch_model-00011-of-00053.bin", "model.layers.10.self_attn.k_proj.weight": "pytorch_model-00011-of-00053.bin", "model.layers.10.self_attn.v_proj.weight": "pytorch_model-00011-of-00053.bin", "model.layers.10.self_attn.o_proj.weight": "pytorch_model-00011-of-00053.bin", "model.layers.10.self_attn.rotary_emb.inv_freq": "pytorch_model-00011-of-00053.bin", "model.layers.10.mlp.gate_proj.weight": "pytorch_model-00011-of-00053.bin", "model.layers.10.mlp.up_proj.weight": "pytorch_model-00011-of-00053.bin", "model.layers.10.mlp.down_proj.weight": "pytorch_model-00011-of-00053.bin", "model.layers.11.input_layernorm.weight": "pytorch_model-00012-of-00053.bin", "model.layers.11.post_attention_layernorm.weight": "pytorch_model-00012-of-00053.bin", "model.layers.11.self_attn.q_proj.weight": "pytorch_model-00012-of-00053.bin", "model.layers.11.self_attn.k_proj.weight": "pytorch_model-00012-of-00053.bin", "model.layers.11.self_attn.v_proj.weight": "pytorch_model-00012-of-00053.bin", "model.layers.11.self_attn.o_proj.weight": "pytorch_model-00012-of-00053.bin", "model.layers.11.self_attn.rotary_emb.inv_freq": "pytorch_model-00012-of-00053.bin", "model.layers.11.mlp.gate_proj.weight": "pytorch_model-00012-of-00053.bin", "model.layers.11.mlp.up_proj.weight": "pytorch_model-00012-of-00053.bin", "model.layers.11.mlp.down_proj.weight": "pytorch_model-00012-of-00053.bin", "model.layers.12.input_layernorm.weight": "pytorch_model-00013-of-00053.bin", "model.layers.12.post_attention_layernorm.weight": "pytorch_model-00013-of-00053.bin", "model.layers.12.self_attn.q_proj.weight": "pytorch_model-00013-of-00053.bin", "model.layers.12.self_attn.k_proj.weight": "pytorch_model-00013-of-00053.bin", "model.layers.12.self_attn.v_proj.weight": "pytorch_model-00013-of-00053.bin", "model.layers.12.self_attn.o_proj.weight": "pytorch_model-00013-of-00053.bin", "model.layers.12.self_attn.rotary_emb.inv_freq": "pytorch_model-00013-of-00053.bin", "model.layers.12.mlp.gate_proj.weight": "pytorch_model-00013-of-00053.bin", "model.layers.12.mlp.up_proj.weight": "pytorch_model-00013-of-00053.bin", "model.layers.12.mlp.down_proj.weight": "pytorch_model-00013-of-00053.bin", "model.layers.13.input_layernorm.weight": "pytorch_model-00014-of-00053.bin", "model.layers.13.post_attention_layernorm.weight": "pytorch_model-00014-of-00053.bin", "model.layers.13.self_attn.q_proj.weight": "pytorch_model-00014-of-00053.bin", "model.layers.13.self_attn.k_proj.weight": "pytorch_model-00014-of-00053.bin", "model.layers.13.self_attn.v_proj.weight": "pytorch_model-00014-of-00053.bin", "model.layers.13.self_attn.o_proj.weight": "pytorch_model-00014-of-00053.bin", "model.layers.13.self_attn.rotary_emb.inv_freq": "pytorch_model-00014-of-00053.bin", "model.layers.13.mlp.gate_proj.weight": "pytorch_model-00014-of-00053.bin", "model.layers.13.mlp.up_proj.weight": "pytorch_model-00014-of-00053.bin", "model.layers.13.mlp.down_proj.weight": "pytorch_model-00014-of-00053.bin", "model.layers.14.input_layernorm.weight": "pytorch_model-00015-of-00053.bin", "model.layers.14.post_attention_layernorm.weight": "pytorch_model-00015-of-00053.bin", "model.layers.14.self_attn.q_proj.weight": "pytorch_model-00015-of-00053.bin", "model.layers.14.self_attn.k_proj.weight": "pytorch_model-00015-of-00053.bin", "model.layers.14.self_attn.v_proj.weight": "pytorch_model-00015-of-00053.bin", "model.layers.14.self_attn.o_proj.weight": "pytorch_model-00015-of-00053.bin", "model.layers.14.self_attn.rotary_emb.inv_freq": "pytorch_model-00015-of-00053.bin", "model.layers.14.mlp.gate_proj.weight": "pytorch_model-00015-of-00053.bin", "model.layers.14.mlp.up_proj.weight": "pytorch_model-00015-of-00053.bin", "model.layers.14.mlp.down_proj.weight": "pytorch_model-00015-of-00053.bin", "model.layers.15.input_layernorm.weight": "pytorch_model-00016-of-00053.bin", "model.layers.15.post_attention_layernorm.weight": "pytorch_model-00016-of-00053.bin", "model.layers.15.self_attn.q_proj.weight": "pytorch_model-00016-of-00053.bin", "model.layers.15.self_attn.k_proj.weight": "pytorch_model-00016-of-00053.bin", "model.layers.15.self_attn.v_proj.weight": "pytorch_model-00016-of-00053.bin", "model.layers.15.self_attn.o_proj.weight": "pytorch_model-00016-of-00053.bin", "model.layers.15.self_attn.rotary_emb.inv_freq": "pytorch_model-00016-of-00053.bin", "model.layers.15.mlp.gate_proj.weight": "pytorch_model-00016-of-00053.bin", "model.layers.15.mlp.up_proj.weight": "pytorch_model-00016-of-00053.bin", "model.layers.15.mlp.down_proj.weight": "pytorch_model-00016-of-00053.bin", "model.layers.16.input_layernorm.weight": "pytorch_model-00017-of-00053.bin", "model.layers.16.post_attention_layernorm.weight": "pytorch_model-00017-of-00053.bin", "model.layers.16.self_attn.q_proj.weight": "pytorch_model-00017-of-00053.bin", "model.layers.16.self_attn.k_proj.weight": "pytorch_model-00017-of-00053.bin", "model.layers.16.self_attn.v_proj.weight": "pytorch_model-00017-of-00053.bin", "model.layers.16.self_attn.o_proj.weight": "pytorch_model-00017-of-00053.bin", "model.layers.16.self_attn.rotary_emb.inv_freq": "pytorch_model-00017-of-00053.bin", "model.layers.16.mlp.gate_proj.weight": "pytorch_model-00017-of-00053.bin", "model.layers.16.mlp.up_proj.weight": "pytorch_model-00017-of-00053.bin", "model.layers.16.mlp.down_proj.weight": "pytorch_model-00017-of-00053.bin", "model.layers.17.input_layernorm.weight": "pytorch_model-00018-of-00053.bin", "model.layers.17.post_attention_layernorm.weight": "pytorch_model-00018-of-00053.bin", "model.layers.17.self_attn.q_proj.weight": "pytorch_model-00018-of-00053.bin", "model.layers.17.self_attn.k_proj.weight": "pytorch_model-00018-of-00053.bin", "model.layers.17.self_attn.v_proj.weight": "pytorch_model-00018-of-00053.bin", "model.layers.17.self_attn.o_proj.weight": "pytorch_model-00018-of-00053.bin", "model.layers.17.self_attn.rotary_emb.inv_freq": "pytorch_model-00018-of-00053.bin", "model.layers.17.mlp.gate_proj.weight": "pytorch_model-00018-of-00053.bin", "model.layers.17.mlp.up_proj.weight": "pytorch_model-00018-of-00053.bin", "model.layers.17.mlp.down_proj.weight": "pytorch_model-00018-of-00053.bin", "model.layers.18.input_layernorm.weight": "pytorch_model-00019-of-00053.bin", "model.layers.18.post_attention_layernorm.weight": "pytorch_model-00019-of-00053.bin", "model.layers.18.self_attn.q_proj.weight": "pytorch_model-00019-of-00053.bin", "model.layers.18.self_attn.k_proj.weight": "pytorch_model-00019-of-00053.bin", "model.layers.18.self_attn.v_proj.weight": "pytorch_model-00019-of-00053.bin", "model.layers.18.self_attn.o_proj.weight": "pytorch_model-00019-of-00053.bin", "model.layers.18.self_attn.rotary_emb.inv_freq": "pytorch_model-00019-of-00053.bin", "model.layers.18.mlp.gate_proj.weight": "pytorch_model-00019-of-00053.bin", "model.layers.18.mlp.up_proj.weight": "pytorch_model-00019-of-00053.bin", "model.layers.18.mlp.down_proj.weight": "pytorch_model-00019-of-00053.bin", "model.layers.19.input_layernorm.weight": "pytorch_model-00020-of-00053.bin", "model.layers.19.post_attention_layernorm.weight": "pytorch_model-00020-of-00053.bin", "model.layers.19.self_attn.q_proj.weight": "pytorch_model-00020-of-00053.bin", "model.layers.19.self_attn.k_proj.weight": "pytorch_model-00020-of-00053.bin", "model.layers.19.self_attn.v_proj.weight": "pytorch_model-00020-of-00053.bin", "model.layers.19.self_attn.o_proj.weight": "pytorch_model-00020-of-00053.bin", "model.layers.19.self_attn.rotary_emb.inv_freq": "pytorch_model-00020-of-00053.bin", "model.layers.19.mlp.gate_proj.weight": "pytorch_model-00020-of-00053.bin", "model.layers.19.mlp.up_proj.weight": "pytorch_model-00020-of-00053.bin", "model.layers.19.mlp.down_proj.weight": "pytorch_model-00020-of-00053.bin", "model.layers.20.input_layernorm.weight": "pytorch_model-00021-of-00053.bin", "model.layers.20.post_attention_layernorm.weight": "pytorch_model-00021-of-00053.bin", "model.layers.20.self_attn.q_proj.weight": "pytorch_model-00021-of-00053.bin", "model.layers.20.self_attn.k_proj.weight": "pytorch_model-00021-of-00053.bin", "model.layers.20.self_attn.v_proj.weight": "pytorch_model-00021-of-00053.bin", "model.layers.20.self_attn.o_proj.weight": "pytorch_model-00021-of-00053.bin", "model.layers.20.self_attn.rotary_emb.inv_freq": "pytorch_model-00021-of-00053.bin", "model.layers.20.mlp.gate_proj.weight": "pytorch_model-00021-of-00053.bin", "model.layers.20.mlp.up_proj.weight": "pytorch_model-00021-of-00053.bin", "model.layers.20.mlp.down_proj.weight": "pytorch_model-00021-of-00053.bin", "model.layers.21.input_layernorm.weight": "pytorch_model-00022-of-00053.bin", "model.layers.21.post_attention_layernorm.weight": "pytorch_model-00022-of-00053.bin", "model.layers.21.self_attn.q_proj.weight": "pytorch_model-00022-of-00053.bin", "model.layers.21.self_attn.k_proj.weight": "pytorch_model-00022-of-00053.bin", "model.layers.21.self_attn.v_proj.weight": "pytorch_model-00022-of-00053.bin", "model.layers.21.self_attn.o_proj.weight": "pytorch_model-00022-of-00053.bin", "model.layers.21.self_attn.rotary_emb.inv_freq": "pytorch_model-00022-of-00053.bin", "model.layers.21.mlp.gate_proj.weight": "pytorch_model-00022-of-00053.bin", "model.layers.21.mlp.up_proj.weight": "pytorch_model-00022-of-00053.bin", "model.layers.21.mlp.down_proj.weight": "pytorch_model-00022-of-00053.bin", "model.layers.22.input_layernorm.weight": "pytorch_model-00023-of-00053.bin", "model.layers.22.post_attention_layernorm.weight": "pytorch_model-00023-of-00053.bin", "model.layers.22.self_attn.q_proj.weight": "pytorch_model-00023-of-00053.bin", "model.layers.22.self_attn.k_proj.weight": "pytorch_model-00023-of-00053.bin", "model.layers.22.self_attn.v_proj.weight": "pytorch_model-00023-of-00053.bin", "model.layers.22.self_attn.o_proj.weight": "pytorch_model-00023-of-00053.bin", "model.layers.22.self_attn.rotary_emb.inv_freq": "pytorch_model-00023-of-00053.bin", "model.layers.22.mlp.gate_proj.weight": "pytorch_model-00023-of-00053.bin", "model.layers.22.mlp.up_proj.weight": "pytorch_model-00023-of-00053.bin", "model.layers.22.mlp.down_proj.weight": "pytorch_model-00023-of-00053.bin", "model.layers.23.input_layernorm.weight": "pytorch_model-00024-of-00053.bin", "model.layers.23.post_attention_layernorm.weight": "pytorch_model-00024-of-00053.bin", "model.layers.23.self_attn.q_proj.weight": "pytorch_model-00024-of-00053.bin", "model.layers.23.self_attn.k_proj.weight": "pytorch_model-00024-of-00053.bin", "model.layers.23.self_attn.v_proj.weight": "pytorch_model-00024-of-00053.bin", "model.layers.23.self_attn.o_proj.weight": "pytorch_model-00024-of-00053.bin", "model.layers.23.self_attn.rotary_emb.inv_freq": "pytorch_model-00024-of-00053.bin", "model.layers.23.mlp.gate_proj.weight": "pytorch_model-00024-of-00053.bin", "model.layers.23.mlp.up_proj.weight": "pytorch_model-00024-of-00053.bin", "model.layers.23.mlp.down_proj.weight": "pytorch_model-00024-of-00053.bin", "model.layers.24.input_layernorm.weight": "pytorch_model-00025-of-00053.bin", "model.layers.24.post_attention_layernorm.weight": "pytorch_model-00025-of-00053.bin", "model.layers.24.self_attn.q_proj.weight": "pytorch_model-00025-of-00053.bin", "model.layers.24.self_attn.k_proj.weight": "pytorch_model-00025-of-00053.bin", "model.layers.24.self_attn.v_proj.weight": "pytorch_model-00025-of-00053.bin", "model.layers.24.self_attn.o_proj.weight": "pytorch_model-00025-of-00053.bin", "model.layers.24.self_attn.rotary_emb.inv_freq": "pytorch_model-00025-of-00053.bin", "model.layers.24.mlp.gate_proj.weight": "pytorch_model-00025-of-00053.bin", "model.layers.24.mlp.up_proj.weight": "pytorch_model-00025-of-00053.bin", "model.layers.24.mlp.down_proj.weight": "pytorch_model-00025-of-00053.bin", "model.layers.25.input_layernorm.weight": "pytorch_model-00026-of-00053.bin", "model.layers.25.post_attention_layernorm.weight": "pytorch_model-00026-of-00053.bin", "model.layers.25.self_attn.q_proj.weight": "pytorch_model-00026-of-00053.bin", "model.layers.25.self_attn.k_proj.weight": "pytorch_model-00026-of-00053.bin", "model.layers.25.self_attn.v_proj.weight": "pytorch_model-00026-of-00053.bin", "model.layers.25.self_attn.o_proj.weight": "pytorch_model-00026-of-00053.bin", "model.layers.25.self_attn.rotary_emb.inv_freq": "pytorch_model-00026-of-00053.bin", "model.layers.25.mlp.gate_proj.weight": "pytorch_model-00026-of-00053.bin", "model.layers.25.mlp.up_proj.weight": "pytorch_model-00026-of-00053.bin", "model.layers.25.mlp.down_proj.weight": "pytorch_model-00026-of-00053.bin", "model.layers.26.input_layernorm.weight": "pytorch_model-00027-of-00053.bin", "model.layers.26.post_attention_layernorm.weight": "pytorch_model-00027-of-00053.bin", "model.layers.26.self_attn.q_proj.weight": "pytorch_model-00027-of-00053.bin", "model.layers.26.self_attn.k_proj.weight": "pytorch_model-00027-of-00053.bin", "model.layers.26.self_attn.v_proj.weight": "pytorch_model-00027-of-00053.bin", "model.layers.26.self_attn.o_proj.weight": "pytorch_model-00027-of-00053.bin", "model.layers.26.self_attn.rotary_emb.inv_freq": "pytorch_model-00027-of-00053.bin", "model.layers.26.mlp.gate_proj.weight": "pytorch_model-00027-of-00053.bin", "model.layers.26.mlp.up_proj.weight": "pytorch_model-00027-of-00053.bin", "model.layers.26.mlp.down_proj.weight": "pytorch_model-00027-of-00053.bin", "model.layers.27.input_layernorm.weight": "pytorch_model-00028-of-00053.bin", "model.layers.27.post_attention_layernorm.weight": "pytorch_model-00028-of-00053.bin", "model.layers.27.self_attn.q_proj.weight": "pytorch_model-00028-of-00053.bin", "model.layers.27.self_attn.k_proj.weight": "pytorch_model-00028-of-00053.bin", "model.layers.27.self_attn.v_proj.weight": "pytorch_model-00028-of-00053.bin", "model.layers.27.self_attn.o_proj.weight": "pytorch_model-00028-of-00053.bin", "model.layers.27.self_attn.rotary_emb.inv_freq": "pytorch_model-00028-of-00053.bin", "model.layers.27.mlp.gate_proj.weight": "pytorch_model-00028-of-00053.bin", "model.layers.27.mlp.up_proj.weight": "pytorch_model-00028-of-00053.bin", "model.layers.27.mlp.down_proj.weight": "pytorch_model-00028-of-00053.bin", "model.layers.28.input_layernorm.weight": "pytorch_model-00029-of-00053.bin", "model.layers.28.post_attention_layernorm.weight": "pytorch_model-00029-of-00053.bin", "model.layers.28.self_attn.q_proj.weight": "pytorch_model-00029-of-00053.bin", "model.layers.28.self_attn.k_proj.weight": "pytorch_model-00029-of-00053.bin", "model.layers.28.self_attn.v_proj.weight": "pytorch_model-00029-of-00053.bin", "model.layers.28.self_attn.o_proj.weight": "pytorch_model-00029-of-00053.bin", "model.layers.28.self_attn.rotary_emb.inv_freq": "pytorch_model-00029-of-00053.bin", "model.layers.28.mlp.gate_proj.weight": "pytorch_model-00029-of-00053.bin", "model.layers.28.mlp.up_proj.weight": "pytorch_model-00029-of-00053.bin", "model.layers.28.mlp.down_proj.weight": "pytorch_model-00029-of-00053.bin", "model.layers.29.input_layernorm.weight": "pytorch_model-00030-of-00053.bin", "model.layers.29.post_attention_layernorm.weight": "pytorch_model-00030-of-00053.bin", "model.layers.29.self_attn.q_proj.weight": "pytorch_model-00030-of-00053.bin", "model.layers.29.self_attn.k_proj.weight": "pytorch_model-00030-of-00053.bin", "model.layers.29.self_attn.v_proj.weight": "pytorch_model-00030-of-00053.bin", "model.layers.29.self_attn.o_proj.weight": "pytorch_model-00030-of-00053.bin", "model.layers.29.self_attn.rotary_emb.inv_freq": "pytorch_model-00030-of-00053.bin", "model.layers.29.mlp.gate_proj.weight": "pytorch_model-00030-of-00053.bin", "model.layers.29.mlp.up_proj.weight": "pytorch_model-00030-of-00053.bin", "model.layers.29.mlp.down_proj.weight": "pytorch_model-00030-of-00053.bin", "model.layers.30.input_layernorm.weight": "pytorch_model-00031-of-00053.bin", "model.layers.30.post_attention_layernorm.weight": "pytorch_model-00031-of-00053.bin", "model.layers.30.self_attn.q_proj.weight": "pytorch_model-00031-of-00053.bin", "model.layers.30.self_attn.k_proj.weight": "pytorch_model-00031-of-00053.bin", "model.layers.30.self_attn.v_proj.weight": "pytorch_model-00031-of-00053.bin", "model.layers.30.self_attn.o_proj.weight": "pytorch_model-00031-of-00053.bin", "model.layers.30.self_attn.rotary_emb.inv_freq": "pytorch_model-00031-of-00053.bin", "model.layers.30.mlp.gate_proj.weight": "pytorch_model-00031-of-00053.bin", "model.layers.30.mlp.up_proj.weight": "pytorch_model-00031-of-00053.bin", "model.layers.30.mlp.down_proj.weight": "pytorch_model-00031-of-00053.bin", "model.layers.31.input_layernorm.weight": "pytorch_model-00032-of-00053.bin", "model.layers.31.post_attention_layernorm.weight": "pytorch_model-00032-of-00053.bin", "model.layers.31.self_attn.q_proj.weight": "pytorch_model-00032-of-00053.bin", "model.layers.31.self_attn.k_proj.weight": "pytorch_model-00032-of-00053.bin", "model.layers.31.self_attn.v_proj.weight": "pytorch_model-00032-of-00053.bin", "model.layers.31.self_attn.o_proj.weight": "pytorch_model-00032-of-00053.bin", "model.layers.31.self_attn.rotary_emb.inv_freq": "pytorch_model-00032-of-00053.bin", "model.layers.31.mlp.gate_proj.weight": "pytorch_model-00032-of-00053.bin", "model.layers.31.mlp.up_proj.weight": "pytorch_model-00032-of-00053.bin", "model.layers.31.mlp.down_proj.weight": "pytorch_model-00032-of-00053.bin", "model.layers.32.input_layernorm.weight": "pytorch_model-00033-of-00053.bin", "model.layers.32.post_attention_layernorm.weight": "pytorch_model-00033-of-00053.bin", "model.layers.32.self_attn.q_proj.weight": "pytorch_model-00033-of-00053.bin", "model.layers.32.self_attn.k_proj.weight": "pytorch_model-00033-of-00053.bin", "model.layers.32.self_attn.v_proj.weight": "pytorch_model-00033-of-00053.bin", "model.layers.32.self_attn.o_proj.weight": "pytorch_model-00033-of-00053.bin", "model.layers.32.self_attn.rotary_emb.inv_freq": "pytorch_model-00033-of-00053.bin", "model.layers.32.mlp.gate_proj.weight": "pytorch_model-00033-of-00053.bin", "model.layers.32.mlp.up_proj.weight": "pytorch_model-00033-of-00053.bin", "model.layers.32.mlp.down_proj.weight": "pytorch_model-00033-of-00053.bin", "model.layers.33.input_layernorm.weight": "pytorch_model-00034-of-00053.bin", "model.layers.33.post_attention_layernorm.weight": "pytorch_model-00034-of-00053.bin", "model.layers.33.self_attn.q_proj.weight": "pytorch_model-00034-of-00053.bin", "model.layers.33.self_attn.k_proj.weight": "pytorch_model-00034-of-00053.bin", "model.layers.33.self_attn.v_proj.weight": "pytorch_model-00034-of-00053.bin", "model.layers.33.self_attn.o_proj.weight": "pytorch_model-00034-of-00053.bin", "model.layers.33.self_attn.rotary_emb.inv_freq": "pytorch_model-00034-of-00053.bin", "model.layers.33.mlp.gate_proj.weight": "pytorch_model-00034-of-00053.bin", "model.layers.33.mlp.up_proj.weight": "pytorch_model-00034-of-00053.bin", "model.layers.33.mlp.down_proj.weight": "pytorch_model-00034-of-00053.bin", "model.layers.34.input_layernorm.weight": "pytorch_model-00035-of-00053.bin", "model.layers.34.post_attention_layernorm.weight": "pytorch_model-00035-of-00053.bin", "model.layers.34.self_attn.q_proj.weight": "pytorch_model-00035-of-00053.bin", "model.layers.34.self_attn.k_proj.weight": "pytorch_model-00035-of-00053.bin", "model.layers.34.self_attn.v_proj.weight": "pytorch_model-00035-of-00053.bin", "model.layers.34.self_attn.o_proj.weight": "pytorch_model-00035-of-00053.bin", "model.layers.34.self_attn.rotary_emb.inv_freq": "pytorch_model-00035-of-00053.bin", "model.layers.34.mlp.gate_proj.weight": "pytorch_model-00035-of-00053.bin", "model.layers.34.mlp.up_proj.weight": "pytorch_model-00035-of-00053.bin", "model.layers.34.mlp.down_proj.weight": "pytorch_model-00035-of-00053.bin", "model.layers.35.input_layernorm.weight": "pytorch_model-00036-of-00053.bin", "model.layers.35.post_attention_layernorm.weight": "pytorch_model-00036-of-00053.bin", "model.layers.35.self_attn.q_proj.weight": "pytorch_model-00036-of-00053.bin", "model.layers.35.self_attn.k_proj.weight": "pytorch_model-00036-of-00053.bin", "model.layers.35.self_attn.v_proj.weight": "pytorch_model-00036-of-00053.bin", "model.layers.35.self_attn.o_proj.weight": "pytorch_model-00036-of-00053.bin", "model.layers.35.self_attn.rotary_emb.inv_freq": "pytorch_model-00036-of-00053.bin", "model.layers.35.mlp.gate_proj.weight": "pytorch_model-00036-of-00053.bin", "model.layers.35.mlp.up_proj.weight": "pytorch_model-00036-of-00053.bin", "model.layers.35.mlp.down_proj.weight": "pytorch_model-00036-of-00053.bin", "model.layers.36.input_layernorm.weight": "pytorch_model-00037-of-00053.bin", "model.layers.36.post_attention_layernorm.weight": "pytorch_model-00037-of-00053.bin", "model.layers.36.self_attn.q_proj.weight": "pytorch_model-00037-of-00053.bin", "model.layers.36.self_attn.k_proj.weight": "pytorch_model-00037-of-00053.bin", "model.layers.36.self_attn.v_proj.weight": "pytorch_model-00037-of-00053.bin", "model.layers.36.self_attn.o_proj.weight": "pytorch_model-00037-of-00053.bin", "model.layers.36.self_attn.rotary_emb.inv_freq": "pytorch_model-00037-of-00053.bin", "model.layers.36.mlp.gate_proj.weight": "pytorch_model-00037-of-00053.bin", "model.layers.36.mlp.up_proj.weight": "pytorch_model-00037-of-00053.bin", "model.layers.36.mlp.down_proj.weight": "pytorch_model-00037-of-00053.bin", "model.layers.37.input_layernorm.weight": "pytorch_model-00038-of-00053.bin", "model.layers.37.post_attention_layernorm.weight": "pytorch_model-00038-of-00053.bin", "model.layers.37.self_attn.q_proj.weight": "pytorch_model-00038-of-00053.bin", "model.layers.37.self_attn.k_proj.weight": "pytorch_model-00038-of-00053.bin", "model.layers.37.self_attn.v_proj.weight": "pytorch_model-00038-of-00053.bin", "model.layers.37.self_attn.o_proj.weight": "pytorch_model-00038-of-00053.bin", "model.layers.37.self_attn.rotary_emb.inv_freq": "pytorch_model-00038-of-00053.bin", "model.layers.37.mlp.gate_proj.weight": "pytorch_model-00038-of-00053.bin", "model.layers.37.mlp.up_proj.weight": "pytorch_model-00038-of-00053.bin", "model.layers.37.mlp.down_proj.weight": "pytorch_model-00038-of-00053.bin", "model.layers.38.input_layernorm.weight": "pytorch_model-00039-of-00053.bin", "model.layers.38.post_attention_layernorm.weight": "pytorch_model-00039-of-00053.bin", "model.layers.38.self_attn.q_proj.weight": "pytorch_model-00039-of-00053.bin", "model.layers.38.self_attn.k_proj.weight": "pytorch_model-00039-of-00053.bin", "model.layers.38.self_attn.v_proj.weight": "pytorch_model-00039-of-00053.bin", "model.layers.38.self_attn.o_proj.weight": "pytorch_model-00039-of-00053.bin", "model.layers.38.self_attn.rotary_emb.inv_freq": "pytorch_model-00039-of-00053.bin", "model.layers.38.mlp.gate_proj.weight": "pytorch_model-00039-of-00053.bin", "model.layers.38.mlp.up_proj.weight": "pytorch_model-00039-of-00053.bin", "model.layers.38.mlp.down_proj.weight": "pytorch_model-00039-of-00053.bin", "model.layers.39.input_layernorm.weight": "pytorch_model-00040-of-00053.bin", "model.layers.39.post_attention_layernorm.weight": "pytorch_model-00040-of-00053.bin", "model.layers.39.self_attn.q_proj.weight": "pytorch_model-00040-of-00053.bin", "model.layers.39.self_attn.k_proj.weight": "pytorch_model-00040-of-00053.bin", "model.layers.39.self_attn.v_proj.weight": "pytorch_model-00040-of-00053.bin", "model.layers.39.self_attn.o_proj.weight": "pytorch_model-00040-of-00053.bin", "model.layers.39.self_attn.rotary_emb.inv_freq": "pytorch_model-00040-of-00053.bin", "model.layers.39.mlp.gate_proj.weight": "pytorch_model-00040-of-00053.bin", "model.layers.39.mlp.up_proj.weight": "pytorch_model-00040-of-00053.bin", "model.layers.39.mlp.down_proj.weight": "pytorch_model-00040-of-00053.bin", "model.layers.40.input_layernorm.weight": "pytorch_model-00041-of-00053.bin", "model.layers.40.post_attention_layernorm.weight": "pytorch_model-00041-of-00053.bin", "model.layers.40.self_attn.q_proj.weight": "pytorch_model-00041-of-00053.bin", "model.layers.40.self_attn.k_proj.weight": "pytorch_model-00041-of-00053.bin", "model.layers.40.self_attn.v_proj.weight": "pytorch_model-00041-of-00053.bin", "model.layers.40.self_attn.o_proj.weight": "pytorch_model-00041-of-00053.bin", "model.layers.40.self_attn.rotary_emb.inv_freq": "pytorch_model-00041-of-00053.bin", "model.layers.40.mlp.gate_proj.weight": "pytorch_model-00041-of-00053.bin", "model.layers.40.mlp.up_proj.weight": "pytorch_model-00041-of-00053.bin", "model.layers.40.mlp.down_proj.weight": "pytorch_model-00041-of-00053.bin", "model.layers.41.input_layernorm.weight": "pytorch_model-00042-of-00053.bin", "model.layers.41.post_attention_layernorm.weight": "pytorch_model-00042-of-00053.bin", "model.layers.41.self_attn.q_proj.weight": "pytorch_model-00042-of-00053.bin", "model.layers.41.self_attn.k_proj.weight": "pytorch_model-00042-of-00053.bin", "model.layers.41.self_attn.v_proj.weight": "pytorch_model-00042-of-00053.bin", "model.layers.41.self_attn.o_proj.weight": "pytorch_model-00042-of-00053.bin", "model.layers.41.self_attn.rotary_emb.inv_freq": "pytorch_model-00042-of-00053.bin", "model.layers.41.mlp.gate_proj.weight": "pytorch_model-00042-of-00053.bin", "model.layers.41.mlp.up_proj.weight": "pytorch_model-00042-of-00053.bin", "model.layers.41.mlp.down_proj.weight": "pytorch_model-00042-of-00053.bin", "model.layers.42.input_layernorm.weight": "pytorch_model-00043-of-00053.bin", "model.layers.42.post_attention_layernorm.weight": "pytorch_model-00043-of-00053.bin", "model.layers.42.self_attn.q_proj.weight": "pytorch_model-00043-of-00053.bin", "model.layers.42.self_attn.k_proj.weight": "pytorch_model-00043-of-00053.bin", "model.layers.42.self_attn.v_proj.weight": "pytorch_model-00043-of-00053.bin", "model.layers.42.self_attn.o_proj.weight": "pytorch_model-00043-of-00053.bin", "model.layers.42.self_attn.rotary_emb.inv_freq": "pytorch_model-00043-of-00053.bin", "model.layers.42.mlp.gate_proj.weight": "pytorch_model-00043-of-00053.bin", "model.layers.42.mlp.up_proj.weight": "pytorch_model-00043-of-00053.bin", "model.layers.42.mlp.down_proj.weight": "pytorch_model-00043-of-00053.bin", "model.layers.43.input_layernorm.weight": "pytorch_model-00044-of-00053.bin", "model.layers.43.post_attention_layernorm.weight": "pytorch_model-00044-of-00053.bin", "model.layers.43.self_attn.q_proj.weight": "pytorch_model-00044-of-00053.bin", "model.layers.43.self_attn.k_proj.weight": "pytorch_model-00044-of-00053.bin", "model.layers.43.self_attn.v_proj.weight": "pytorch_model-00044-of-00053.bin", "model.layers.43.self_attn.o_proj.weight": "pytorch_model-00044-of-00053.bin", "model.layers.43.self_attn.rotary_emb.inv_freq": "pytorch_model-00044-of-00053.bin", "model.layers.43.mlp.gate_proj.weight": "pytorch_model-00044-of-00053.bin", "model.layers.43.mlp.up_proj.weight": "pytorch_model-00044-of-00053.bin", "model.layers.43.mlp.down_proj.weight": "pytorch_model-00044-of-00053.bin", "model.layers.44.input_layernorm.weight": "pytorch_model-00045-of-00053.bin", "model.layers.44.post_attention_layernorm.weight": "pytorch_model-00045-of-00053.bin", "model.layers.44.self_attn.q_proj.weight": "pytorch_model-00045-of-00053.bin", "model.layers.44.self_attn.k_proj.weight": "pytorch_model-00045-of-00053.bin", "model.layers.44.self_attn.v_proj.weight": "pytorch_model-00045-of-00053.bin", "model.layers.44.self_attn.o_proj.weight": "pytorch_model-00045-of-00053.bin", "model.layers.44.self_attn.rotary_emb.inv_freq": "pytorch_model-00045-of-00053.bin", "model.layers.44.mlp.gate_proj.weight": "pytorch_model-00045-of-00053.bin", "model.layers.44.mlp.up_proj.weight": "pytorch_model-00045-of-00053.bin", "model.layers.44.mlp.down_proj.weight": "pytorch_model-00045-of-00053.bin", "model.layers.45.input_layernorm.weight": "pytorch_model-00046-of-00053.bin", "model.layers.45.post_attention_layernorm.weight": "pytorch_model-00046-of-00053.bin", "model.layers.45.self_attn.q_proj.weight": "pytorch_model-00046-of-00053.bin", "model.layers.45.self_attn.k_proj.weight": "pytorch_model-00046-of-00053.bin", "model.layers.45.self_attn.v_proj.weight": "pytorch_model-00046-of-00053.bin", "model.layers.45.self_attn.o_proj.weight": "pytorch_model-00046-of-00053.bin", "model.layers.45.self_attn.rotary_emb.inv_freq": "pytorch_model-00046-of-00053.bin", "model.layers.45.mlp.gate_proj.weight": "pytorch_model-00046-of-00053.bin", "model.layers.45.mlp.up_proj.weight": "pytorch_model-00046-of-00053.bin", "model.layers.45.mlp.down_proj.weight": "pytorch_model-00046-of-00053.bin", "model.layers.46.input_layernorm.weight": "pytorch_model-00047-of-00053.bin", "model.layers.46.post_attention_layernorm.weight": "pytorch_model-00047-of-00053.bin", "model.layers.46.self_attn.q_proj.weight": "pytorch_model-00047-of-00053.bin", "model.layers.46.self_attn.k_proj.weight": "pytorch_model-00047-of-00053.bin", "model.layers.46.self_attn.v_proj.weight": "pytorch_model-00047-of-00053.bin", "model.layers.46.self_attn.o_proj.weight": "pytorch_model-00047-of-00053.bin", "model.layers.46.self_attn.rotary_emb.inv_freq": "pytorch_model-00047-of-00053.bin", "model.layers.46.mlp.gate_proj.weight": "pytorch_model-00047-of-00053.bin", "model.layers.46.mlp.up_proj.weight": "pytorch_model-00047-of-00053.bin", "model.layers.46.mlp.down_proj.weight": "pytorch_model-00047-of-00053.bin", "model.layers.47.input_layernorm.weight": "pytorch_model-00048-of-00053.bin", "model.layers.47.post_attention_layernorm.weight": "pytorch_model-00048-of-00053.bin", "model.layers.47.self_attn.q_proj.weight": "pytorch_model-00048-of-00053.bin", "model.layers.47.self_attn.k_proj.weight": "pytorch_model-00048-of-00053.bin", "model.layers.47.self_attn.v_proj.weight": "pytorch_model-00048-of-00053.bin", "model.layers.47.self_attn.o_proj.weight": "pytorch_model-00048-of-00053.bin", "model.layers.47.self_attn.rotary_emb.inv_freq": "pytorch_model-00048-of-00053.bin", "model.layers.47.mlp.gate_proj.weight": "pytorch_model-00048-of-00053.bin", "model.layers.47.mlp.up_proj.weight": "pytorch_model-00048-of-00053.bin", "model.layers.47.mlp.down_proj.weight": "pytorch_model-00048-of-00053.bin", "model.layers.48.input_layernorm.weight": "pytorch_model-00049-of-00053.bin", "model.layers.48.post_attention_layernorm.weight": "pytorch_model-00049-of-00053.bin", "model.layers.48.self_attn.q_proj.weight": "pytorch_model-00049-of-00053.bin", "model.layers.48.self_attn.k_proj.weight": "pytorch_model-00049-of-00053.bin", "model.layers.48.self_attn.v_proj.weight": "pytorch_model-00049-of-00053.bin", "model.layers.48.self_attn.o_proj.weight": "pytorch_model-00049-of-00053.bin", "model.layers.48.self_attn.rotary_emb.inv_freq": "pytorch_model-00049-of-00053.bin", "model.layers.48.mlp.gate_proj.weight": "pytorch_model-00049-of-00053.bin", "model.layers.48.mlp.up_proj.weight": "pytorch_model-00049-of-00053.bin", "model.layers.48.mlp.down_proj.weight": "pytorch_model-00049-of-00053.bin", "model.layers.49.input_layernorm.weight": "pytorch_model-00050-of-00053.bin", "model.layers.49.post_attention_layernorm.weight": "pytorch_model-00050-of-00053.bin", "model.layers.49.self_attn.q_proj.weight": "pytorch_model-00050-of-00053.bin", "model.layers.49.self_attn.k_proj.weight": "pytorch_model-00050-of-00053.bin", "model.layers.49.self_attn.v_proj.weight": "pytorch_model-00050-of-00053.bin", "model.layers.49.self_attn.o_proj.weight": "pytorch_model-00050-of-00053.bin", "model.layers.49.self_attn.rotary_emb.inv_freq": "pytorch_model-00050-of-00053.bin", "model.layers.49.mlp.gate_proj.weight": "pytorch_model-00050-of-00053.bin", "model.layers.49.mlp.up_proj.weight": "pytorch_model-00050-of-00053.bin", "model.layers.49.mlp.down_proj.weight": "pytorch_model-00050-of-00053.bin", "model.layers.50.input_layernorm.weight": "pytorch_model-00051-of-00053.bin", "model.layers.50.post_attention_layernorm.weight": "pytorch_model-00051-of-00053.bin", "model.layers.50.self_attn.q_proj.weight": "pytorch_model-00051-of-00053.bin", "model.layers.50.self_attn.k_proj.weight": "pytorch_model-00051-of-00053.bin", "model.layers.50.self_attn.v_proj.weight": "pytorch_model-00051-of-00053.bin", "model.layers.50.self_attn.o_proj.weight": "pytorch_model-00051-of-00053.bin", "model.layers.50.self_attn.rotary_emb.inv_freq": "pytorch_model-00051-of-00053.bin", "model.layers.50.mlp.gate_proj.weight": "pytorch_model-00051-of-00053.bin", "model.layers.50.mlp.up_proj.weight": "pytorch_model-00051-of-00053.bin", "model.layers.50.mlp.down_proj.weight": "pytorch_model-00051-of-00053.bin", "model.layers.51.input_layernorm.weight": "pytorch_model-00052-of-00053.bin", "model.layers.51.post_attention_layernorm.weight": "pytorch_model-00052-of-00053.bin", "model.layers.51.self_attn.q_proj.weight": "pytorch_model-00052-of-00053.bin", "model.layers.51.self_attn.k_proj.weight": "pytorch_model-00052-of-00053.bin", "model.layers.51.self_attn.v_proj.weight": "pytorch_model-00052-of-00053.bin", "model.layers.51.self_attn.o_proj.weight": "pytorch_model-00052-of-00053.bin", "model.layers.51.self_attn.rotary_emb.inv_freq": "pytorch_model-00052-of-00053.bin", "model.layers.51.mlp.gate_proj.weight": "pytorch_model-00052-of-00053.bin", "model.layers.51.mlp.up_proj.weight": "pytorch_model-00052-of-00053.bin", "model.layers.51.mlp.down_proj.weight": "pytorch_model-00052-of-00053.bin", "model.norm.weight": "pytorch_model-00053-of-00053.bin", "model.embed_tokens.weight": "pytorch_model-00053-of-00053.bin", "lm_head.weight": "pytorch_model-00053-of-00053.bin"}}
\ No newline at end of file
diff --git a/special_tokens_map.json b/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..d85ba6cb6820b01226ef8bd40b46bb489041c6a8
--- /dev/null
+++ b/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": false
+ },
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/tokenization_skywork.py b/tokenization_skywork.py
new file mode 100644
index 0000000000000000000000000000000000000000..b0b40bff32793305bee9efb40ef27c15372aef34
--- /dev/null
+++ b/tokenization_skywork.py
@@ -0,0 +1,267 @@
+# coding=utf-8
+# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
+#
+# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
+# and OPT implementations in this library. It has been modified from its
+# original forms to accommodate minor architectural differences compared
+# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Tokenization classes for Skywork."""
+import os
+from shutil import copyfile
+from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
+
+import sentencepiece as spm
+
+from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer
+from transformers.utils import logging
+
+if TYPE_CHECKING:
+ from transformers.pipelines.conversational import Conversation
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {"vocab_file": "tokenizer.model"}
+
+
+SPIECE_UNDERLINE = "▁"
+
+B_INST, E_INST = "[INST]", "[/INST]"
+B_SYS, E_SYS = "<>\n", "\n<>\n\n"
+
+DEFAULT_SYSTEM_PROMPT = """You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure\
+that your responses are socially unbiased and positive in nature.
+
+If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information."""
+
+class SkyworkTokenizer(PreTrainedTokenizer):
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ # pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
+ # max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
+ model_input_names = ["input_ids", "attention_mask"]
+
+ def __init__(
+ self,
+ vocab_file,
+ unk_token="",
+ bos_token="",
+ eos_token="",
+ pad_token=None,
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
+ add_bos_token=True,
+ add_eos_token=False,
+ clean_up_tokenization_spaces=False,
+ legacy=True,
+ **kwargs,
+ ):
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
+ bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
+ eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
+ unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
+ pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
+ self.legacy = legacy
+ self.vocab_file = vocab_file
+ self.add_bos_token = add_bos_token
+ self.add_eos_token = add_eos_token
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
+ self.sp_model.Load(vocab_file)
+ super().__init__(
+ bos_token=bos_token,
+ eos_token=eos_token,
+ unk_token=unk_token,
+ pad_token=pad_token,
+ add_bos_token=add_bos_token,
+ add_eos_token=add_eos_token,
+ sp_model_kwargs=self.sp_model_kwargs,
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
+ legacy=legacy,
+ **kwargs,
+ )
+ if legacy:
+ logger.warning_once(
+ f"You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. "
+ )
+
+
+ def __getstate__(self):
+ state = self.__dict__.copy()
+ state["sp_model"] = None
+ state["sp_model_proto"] = self.sp_model.serialized_model_proto()
+ return state
+
+ def __setstate__(self, d):
+ self.__dict__ = d
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
+ self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
+
+ @property
+ def vocab_size(self):
+ """Returns vocab size"""
+ return self.sp_model.get_piece_size()
+
+ def get_vocab(self):
+ """Returns vocab as a dict"""
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
+ vocab.update(self.added_tokens_encoder)
+ return vocab
+
+ # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.tokenize
+ def tokenize(self, text, **kwargs) -> List[str]:
+ # Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
+ # the beginning of the text
+ if not self.legacy:
+ text = SPIECE_UNDERLINE + text.replace(SPIECE_UNDERLINE, " ")
+ return super().tokenize(text, **kwargs)
+
+ # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer._tokenize
+ def _tokenize(self, text):
+ if not self.legacy:
+ is_first = text.startswith(SPIECE_UNDERLINE)
+ if is_first:
+ text = text[1:]
+
+ tokens = self.sp_model.encode(text, out_type=str)
+
+ if not self.legacy and not is_first and not text.startswith(" ") and tokens[0].startswith(SPIECE_UNDERLINE):
+ tokens = ([tokens[0][1:]] if len(tokens[0]) > 1 else []) + tokens[1:]
+ return tokens
+
+ def _convert_token_to_id(self, token):
+ """Converts a token (str) in an id using the vocab."""
+ return self.sp_model.piece_to_id(token)
+
+ def _convert_id_to_token(self, index):
+ """Converts an index (integer) in a token (str) using the vocab."""
+ token = self.sp_model.IdToPiece(index)
+ return token
+
+ def convert_tokens_to_string(self, tokens):
+ """Converts a sequence of tokens (string) in a single string."""
+ current_sub_tokens = []
+ out_string = ""
+ prev_is_special = False
+ for i, token in enumerate(tokens):
+ # make sure that special tokens are not decoded using sentencepiece model
+ if token in self.all_special_tokens:
+ if not prev_is_special and i != 0:
+ out_string += " "
+ out_string += self.sp_model.decode(current_sub_tokens) + token
+ prev_is_special = True
+ current_sub_tokens = []
+ else:
+ current_sub_tokens.append(token)
+ prev_is_special = False
+ out_string += self.sp_model.decode(current_sub_tokens)
+ return out_string
+
+ def save_vocabulary(self, save_directory, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ if not os.path.isdir(save_directory):
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
+ return
+ out_vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
+ )
+
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
+ copyfile(self.vocab_file, out_vocab_file)
+ elif not os.path.isfile(self.vocab_file):
+ with open(out_vocab_file, "wb") as fi:
+ content_spiece_model = self.sp_model.serialized_model_proto()
+ fi.write(content_spiece_model)
+
+ return (out_vocab_file,)
+
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
+
+ output = bos_token_id + token_ids_0 + eos_token_id
+
+ if token_ids_1 is not None:
+ output = output + bos_token_id + token_ids_1 + eos_token_id
+
+ return output
+
+ def get_special_tokens_mask(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
+ ) -> List[int]:
+ if already_has_special_tokens:
+ return super().get_special_tokens_mask(
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
+ )
+
+ bos_token_id = [1] if self.add_bos_token else []
+ eos_token_id = [1] if self.add_eos_token else []
+
+ if token_ids_1 is None:
+ return bos_token_id + ([0] * len(token_ids_0)) + eos_token_id
+ return (
+ bos_token_id
+ + ([0] * len(token_ids_0))
+ + eos_token_id
+ + bos_token_id
+ + ([0] * len(token_ids_1))
+ + eos_token_id
+ )
+
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
+
+ output = [0] * len(bos_token_id + token_ids_0 + eos_token_id)
+
+ if token_ids_1 is not None:
+ output += [1] * len(bos_token_id + token_ids_1 + eos_token_id)
+
+ return output
+
+ def _build_conversation_input_ids(self, conversation: "Conversation") -> List[int]:
+ dialogue = list(conversation.iter_texts())
+ if not all([is_user for is_user, msg in dialogue[::2]]) or not all(
+ [not is_user for is_user, msg in dialogue[1::2]]
+ ):
+ raise ValueError(
+ "The model only supports 'user' and 'assistant' roles, starting with user and alternating (u/a/u/a/u...)"
+ )
+
+ dialog_tokens: List[int] = []
+ if len(conversation.past_user_inputs) > 0:
+ if not conversation.past_user_inputs[0].startswith(B_SYS) or E_SYS not in conversation.past_user_inputs[0]:
+ conversation.past_user_inputs[0] = (
+ B_SYS + DEFAULT_SYSTEM_PROMPT + E_SYS + conversation.past_user_inputs[0]
+ )
+ elif not dialogue[0][1].startswith(B_SYS) or E_SYS not in dialogue[0][1]:
+ dialogue[0] = (dialogue[0][0], B_SYS + DEFAULT_SYSTEM_PROMPT + E_SYS + dialogue[0][1])
+
+ dialog_tokens += sum(
+ [
+ [self.bos_token_id]
+ + self.encode(
+ f"{B_INST} {(prompt[1]).strip()} {E_INST} {(answer[1]).strip()} ", add_special_tokens=False
+ )
+ + [self.eos_token_id]
+ for prompt, answer in zip(dialogue[::2], dialogue[1::2])
+ ],
+ [],
+ )
+ if not (dialogue[-1][0]):
+ raise ValueError(f"Last message must be from user, got {dialogue[-1]['role']}")
+ dialog_tokens += [self.bos_token_id] + self.encode(
+ f"{B_INST} {(dialogue[-1][1]).strip()} {E_INST}", add_special_tokens=False
+ )
+ return dialog_tokens
diff --git a/tokenizer.model b/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..decbfe220922d6a38ff52541ef3927b97fb7893e
--- /dev/null
+++ b/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:36ec9a4d6fd7cc78fbb9e4afd89fb04cba0381b08a842ca0b60826073821f594
+size 994250
diff --git a/tokenizer_config.json b/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..9c232b8b78a3ad2ce894b9a17628f3821627ccd7
--- /dev/null
+++ b/tokenizer_config.json
@@ -0,0 +1,40 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "bos_token": {
+ "__type": "AddedToken",
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": false
+ },
+ "clean_up_tokenization_spaces": false,
+ "eos_token": {
+ "__type": "AddedToken",
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": false
+ },
+ "legacy": true,
+ "model_max_length": 1000000000000000019884624838656,
+ "pad_token": null,
+ "sp_model_kwargs": {},
+ "tokenizer_class": "SkyworkTokenizer",
+ "unk_token": {
+ "__type": "AddedToken",
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": false
+ },
+ "auto_map": {
+ "AutoTokenizer": [
+ "tokenization_skywork.SkyworkTokenizer",
+ null
+ ]
+ }
+}