diff --git a/config.json b/config.json new file mode 100644 index 0000000000000000000000000000000000000000..6d758dc655139f6bb0d76cf63b1b4f819ac6802e --- /dev/null +++ b/config.json @@ -0,0 +1,30 @@ +{ + "_name_or_path": "/DATA4T/text-generation-webui/models/yi-sex", + "architectures": [ + "YiForCausalLM" + ], + "auto_map": { + "AutoConfig": "configuration_yi.YiConfig", + "AutoModel": "modeling_yi.YiForCausalLM", + "AutoModelForCausalLM": "modeling_yi.YiForCausalLM" + }, + "bos_token_id": 1, + "eos_token_id": 2, + "hidden_act": "silu", + "hidden_size": 7168, + "initializer_range": 0.02, + "intermediate_size": 20480, + "max_position_embeddings": 4096, + "model_type": "Yi", + "num_attention_heads": 56, + "num_hidden_layers": 60, + "num_key_value_heads": 8, + "pad_token_id": 0, + "rms_norm_eps": 1e-05, + "rope_theta": 5000000.0, + "tie_word_embeddings": false, + "torch_dtype": "bfloat16", + "transformers_version": "4.34.1", + "use_cache": true, + "vocab_size": 64000 +} diff --git a/configuration_yi.py b/configuration_yi.py new file mode 100644 index 0000000000000000000000000000000000000000..73a370b5b90151cf199773fad7491c62ef412fda --- /dev/null +++ b/configuration_yi.py @@ -0,0 +1,121 @@ +""" Yi model configuration""" +from transformers.configuration_utils import PretrainedConfig +from transformers.utils import logging + +logger = logging.get_logger(__name__) + +Yi_PRETRAINED_CONFIG_ARCHIVE_MAP = {} + + +class YiConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`YiModel`]. It is used to instantiate an Yi + model according to the specified arguments, defining the model architecture. Instantiating a configuration with the + defaults will yield a similar configuration to that of the Yi model. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + + Args: + vocab_size (`int`, *optional*, defaults to 64000): + Vocabulary size of the Yi model. Defines the number of different tokens that can be represented by the + `inputs_ids` passed when calling [`YiModel`] + hidden_size (`int`, *optional*, defaults to 4096): + Dimension of the hidden representations. + intermediate_size (`int`, *optional*, defaults to 11008): + Dimension of the MLP representations. + num_hidden_layers (`int`, *optional*, defaults to 32): + Number of hidden layers in the Transformer encoder. + num_attention_heads (`int`, *optional*, defaults to 32): + Number of attention heads for each attention layer in the Transformer encoder. + num_key_value_heads (`int`, *optional*): + This is the number of key_value heads that should be used to implement Grouped Query Attention. If + `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if + `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When + converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed + by meanpooling all the original heads within that group. For more details checkout [this + paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to + `num_attention_heads`. + hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): + The non-linear activation function (function or string) in the decoder. + max_position_embeddings (`int`, *optional*, defaults to 4096): + The maximum sequence length that this model might ever be used with. Typically set this to something large + just in case (e.g., 512 or 1024 or 2048 or 4096). + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + rms_norm_eps (`float`, *optional*, defaults to 1e-5): + The epsilon used by the rms normalization layers. + use_cache (`bool`, *optional*, defaults to `True`): + Whether or not the model should return the last key/values attentions (not used by all models). Only + relevant if `config.is_decoder=True`. + tie_word_embeddings(`bool`, *optional*, defaults to `False`): + Whether to tie weight embeddings + output_attentions (`bool`, *optional*, defaults to `False`): + Whether or not to output attentions. + rope_theta (`float`, *optional*, defaults to 5000000.0): + The base period of the RoPE embeddings. + Example: + + ```python + >>> from transformers import YiModel, YiConfig + + >>> # Initializing a Yi style configuration + >>> configuration = YiConfig() + + >>> # Initializing a model from the Yi style configuration + >>> model = YiModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + model_type = "Yi" + keys_to_ignore_at_inference = ["past_key_values"] + + def __init__( + self, + vocab_size=64000, + hidden_size=4096, + intermediate_size=11008, + num_hidden_layers=32, + num_attention_heads=32, + num_key_value_heads=4, + hidden_act="silu", + max_position_embeddings=4096, + initializer_range=0.02, + rms_norm_eps=1e-5, + use_cache=True, + pad_token_id=0, + bos_token_id=1, + eos_token_id=2, + tie_word_embeddings=False, + output_attentions=False, + rope_theta=5000000.0, + **kwargs, + ): + self.vocab_size = vocab_size + self.max_position_embeddings = max_position_embeddings + self.hidden_size = hidden_size + self.intermediate_size = intermediate_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + + # for backward compatibility + if num_key_value_heads is None: + num_key_value_heads = num_attention_heads + + self.num_key_value_heads = num_key_value_heads + self.hidden_act = hidden_act + self.initializer_range = initializer_range + self.rms_norm_eps = rms_norm_eps + self.use_cache = use_cache + self.output_attentions = output_attentions + self.rope_theta = rope_theta + + super().__init__( + pad_token_id=pad_token_id, + bos_token_id=bos_token_id, + eos_token_id=eos_token_id, + tie_word_embeddings=tie_word_embeddings, + **kwargs, + ) diff --git a/generation_config.json b/generation_config.json new file mode 100644 index 0000000000000000000000000000000000000000..c26d216dc64711b76750a0d69db303a23484a49b --- /dev/null +++ b/generation_config.json @@ -0,0 +1,7 @@ +{ + "_from_model_config": true, + "bos_token_id": 1, + "eos_token_id": 2, + "pad_token_id": 0, + "transformers_version": "4.34.1" +} diff --git a/modeling_yi.py b/modeling_yi.py new file mode 100644 index 0000000000000000000000000000000000000000..aacbca5e2631ae0fa4aea0b21b812c998b05bf5f --- /dev/null +++ b/modeling_yi.py @@ -0,0 +1,1030 @@ +""" PyTorch Yi model.""" +import math +from typing import List, Optional, Tuple, Union + +import torch.utils.checkpoint +from einops import repeat +from torch import nn +from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss + +from transformers.activations import ACT2FN +from transformers.modeling_outputs import ( + BaseModelOutputWithPast, + CausalLMOutputWithPast, + SequenceClassifierOutputWithPast, +) +from transformers.modeling_utils import PreTrainedModel +from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS +from transformers.utils import ( + add_start_docstrings, + add_start_docstrings_to_model_forward, + is_flash_attn_available, + logging, + replace_return_docstrings, +) + +from .configuration_yi import YiConfig + + +if is_flash_attn_available(): + from flash_attn import flash_attn_func + + +logger = logging.get_logger(__name__) + +_CONFIG_FOR_DOC = "YiConfig" + + +# Copied from transformers.models.bart.modeling_bart._make_causal_mask +def _make_causal_mask( + input_ids_shape: torch.Size, + dtype: torch.dtype, + device: torch.device, + past_key_values_length: int = 0, +): + """ + Make causal mask used for bi-directional self-attention. + """ + bsz, tgt_len = input_ids_shape + mask = torch.full( + (tgt_len, tgt_len), + torch.tensor(torch.finfo(dtype).min, device=device), + device=device, + ) + mask_cond = torch.arange(mask.size(-1), device=device) + mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) + mask = mask.to(dtype) + + if past_key_values_length > 0: + mask = torch.cat( + [ + torch.zeros( + tgt_len, past_key_values_length, dtype=dtype, device=device + ), + mask, + ], + dim=-1, + ) + return mask[None, None, :, :].expand( + bsz, 1, tgt_len, tgt_len + past_key_values_length + ) + + +# Copied from transformers.models.bart.modeling_bart._expand_mask +def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): + """ + Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. + """ + bsz, src_len = mask.size() + tgt_len = tgt_len if tgt_len is not None else src_len + + expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) + + inverted_mask = 1.0 - expanded_mask + + return inverted_mask.masked_fill( + inverted_mask.to(torch.bool), torch.finfo(dtype).min + ) + + +class YiRMSNorm(nn.Module): + def __init__(self, hidden_size, eps=1e-5): + """ + YiRMSNorm is equivalent to T5LayerNorm + """ + super().__init__() + self.weight = nn.Parameter(torch.ones(hidden_size)) + self.variance_epsilon = eps + + def forward(self, hidden_states): + input_dtype = hidden_states.dtype + hidden_states = hidden_states.to(torch.float32) + variance = hidden_states.pow(2).mean(-1, keepdim=True) + hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) + + return self.weight * hidden_states.to(input_dtype) + + +ALL_LAYERNORM_LAYERS.append(YiRMSNorm) + + +class YiRotaryEmbedding(torch.nn.Module): + def __init__(self, dim, max_position_embeddings=4096, base=5000000, device=None): + super().__init__() + + self.dim = dim + self.max_position_embeddings = max_position_embeddings + self.base = base + + # Build here to make `torch.jit.trace` work. + self._set_cos_sin_cache(seq_len=max_position_embeddings, device=device) + + def _set_cos_sin_cache(self, seq_len, device): + self.max_seq_len_cached = seq_len + inv_freq = 1.0 / ( + self.base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim) + ) + t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.float32) + freqs = torch.einsum("i,j->ij", t, inv_freq) + # Different from paper, but it uses a different permutation in order to obtain the same calculation + emb = torch.cat((freqs, freqs), dim=-1) + self.register_buffer( + "cos_cached", emb.cos()[None, None, :, :], persistent=False + ) + self.register_buffer( + "sin_cached", emb.sin()[None, None, :, :], persistent=False + ) + + def forward(self, x, seq_len=None): + # x: [bs, num_attention_heads, seq_len, head_size] + if seq_len > self.max_seq_len_cached: + self._set_cos_sin_cache(seq_len=seq_len, device=x.device) + + return ( + self.cos_cached[:, :, :seq_len, ...].to(dtype=x.dtype), + self.sin_cached[:, :, :seq_len, ...].to(dtype=x.dtype), + ) + + +def rotate_half(x): + """Rotates half the hidden dims of the input.""" + x1 = x[..., : x.shape[-1] // 2] + x2 = x[..., x.shape[-1] // 2 :] + return torch.cat((-x2, x1), dim=-1) + + +def apply_rotary_pos_emb(q, k, cos, sin, position_ids, flash_attn_available): + # The first two dimensions of cos and sin are always 1, so we can `squeeze` them. + cos = cos.squeeze(1).squeeze(0) # [seq_len, dim] + sin = sin.squeeze(1).squeeze(0) # [seq_len, dim] + expand_dim = 2 if flash_attn_available else 1 + cos = cos[position_ids].unsqueeze(expand_dim) # [bs, seq_len, dim] + sin = sin[position_ids].unsqueeze(expand_dim) # [bs, seq_len, dim] + q_embed = (q * cos) + (rotate_half(q) * sin) + k_embed = (k * cos) + (rotate_half(k) * sin) + return q_embed, k_embed + + +class YiMLP(nn.Module): + def __init__(self, hidden_size: int, intermediate_size: int, hidden_act: str): + super().__init__() + self.gate_proj = nn.Linear(hidden_size, intermediate_size, bias=False) + self.down_proj = nn.Linear(intermediate_size, hidden_size, bias=False) + self.up_proj = nn.Linear(hidden_size, intermediate_size, bias=False) + self.act_fn = ACT2FN[hidden_act] + + def forward(self, x): + return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) + + +class YiAttention(nn.Module): + """Multi-headed attention from 'Attention Is All You Need' paper""" + + def __init__(self, config: YiConfig): + super().__init__() + self.config = config + self.hidden_size = config.hidden_size + self.num_heads = config.num_attention_heads + self.head_dim = self.hidden_size // self.num_heads + self.num_key_value_heads = config.num_key_value_heads + self.num_key_value_groups = self.num_heads // self.num_key_value_heads + self.max_position_embeddings = config.max_position_embeddings + + if (self.head_dim * self.num_heads) != self.hidden_size: + raise ValueError( + f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}" + f" and `num_heads`: {self.num_heads})." + ) + self.q_proj = nn.Linear( + self.hidden_size, self.num_heads * self.head_dim, bias=False + ) + self.k_proj = nn.Linear( + self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False + ) + self.v_proj = nn.Linear( + self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False + ) + self.o_proj = nn.Linear( + self.num_heads * self.head_dim, self.hidden_size, bias=False + ) + + self.rotary_emb = YiRotaryEmbedding( + self.head_dim, + max_position_embeddings=self.max_position_embeddings, + base=self.config.rope_theta, + ) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: bool = False, + use_cache: bool = False, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + bsz, q_len, _ = hidden_states.size() + flash_attn_available = is_flash_attn_available() + + query_states = self.q_proj(hidden_states).view( + bsz, q_len, self.num_heads, self.head_dim + ) + + key_states = self.k_proj(hidden_states).view( + bsz, q_len, self.num_key_value_heads, self.head_dim + ) + value_states = self.v_proj(hidden_states).view( + bsz, q_len, self.num_key_value_heads, self.head_dim + ) + + if not flash_attn_available: + if self.num_key_value_groups > 1: + key_states = repeat( + key_states, f"b n h d -> b n (h {self.num_key_value_groups}) d" + ) + value_states = repeat( + value_states, f"b n h d -> b n (h {self.num_key_value_groups}) d" + ) + + # b n h d -> b h n d + query_states = query_states.transpose(1, 2) + key_states = key_states.transpose(1, 2) + value_states = value_states.transpose(1, 2) + + seq_dim = 1 if flash_attn_available else 2 + kv_seq_len = key_states.shape[seq_dim] + if past_key_value is not None: + kv_seq_len += past_key_value[0].shape[seq_dim] + cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) + query_states, key_states = apply_rotary_pos_emb( + query_states, key_states, cos, sin, position_ids, flash_attn_available + ) + + if past_key_value is not None: + # reuse k, v, self_attention + key_states = torch.cat([past_key_value[0], key_states], dim=seq_dim) + value_states = torch.cat([past_key_value[1], value_states], dim=seq_dim) + + past_key_value = (key_states, value_states) if use_cache else None + + if flash_attn_available: + attn_output = flash_attn_func( + query_states, key_states, value_states, dropout_p=0.0, causal=True + ) + else: + attn_weights = torch.matmul( + query_states, key_states.transpose(2, 3) + ) / math.sqrt(self.head_dim) + + if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len): + raise ValueError( + f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is" + f" {attn_weights.size()}" + ) + + if attention_mask is not None: + if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): + raise ValueError( + f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is" + f"{attention_mask.size()}" + ) + attn_weights = attn_weights + attention_mask + dtype_min = torch.tensor( + torch.finfo(attn_weights.dtype).min, + device=attn_weights.device, + dtype=attn_weights.dtype, + ) + attn_weights = torch.max(attn_weights, dtype_min) + + # upcast attention to fp32 + attn_weights = nn.functional.softmax( + attn_weights, dim=-1, dtype=torch.float32 + ).to(query_states.dtype) + attn_output = torch.matmul(attn_weights, value_states) + + if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): + raise ValueError( + f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is" + f" {attn_output.size()}" + ) + + if not flash_attn_available: + attn_output = attn_output.transpose(1, 2) + + attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) + + attn_output = self.o_proj(attn_output) + + if not output_attentions: + attn_weights = None + + return attn_output, attn_weights, past_key_value + + +class YiDecoderLayer(nn.Module): + def __init__(self, config: YiConfig): + super().__init__() + + self.hidden_size = config.hidden_size + self.self_attn = YiAttention(config=config) + self.mlp = YiMLP( + hidden_size=self.hidden_size, + intermediate_size=config.intermediate_size, + hidden_act=config.hidden_act, + ) + + self.ln1 = YiRMSNorm(config.hidden_size, eps=config.rms_norm_eps) + self.ln2 = YiRMSNorm(config.hidden_size, eps=config.rms_norm_eps) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: Optional[bool] = False, + use_cache: Optional[bool] = False, + ) -> Tuple[ + torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]] + ]: + """ + Args: + hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` + attention_mask (`torch.FloatTensor`, *optional*): attention mask of size + `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding + (see `past_key_values`). + past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states + """ + + residual = hidden_states + + hidden_states = self.ln1(hidden_states) + + # Self Attention + hidden_states, self_attn_weights, present_key_value = self.self_attn( + hidden_states=hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + ) + hidden_states = residual + hidden_states + + # Fully Connected + residual = hidden_states + hidden_states = self.ln2(hidden_states) + hidden_states = self.mlp(hidden_states) + hidden_states = residual + hidden_states + + outputs = (hidden_states,) + + if output_attentions: + outputs += (self_attn_weights,) + + if use_cache: + outputs += (present_key_value,) + + return outputs + + +Yi_START_DOCSTRING = r""" + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`YiConfig`]): + Model configuration class with all the parameters of the model. Initializing with a config file does not + load the weights associated with the model, only the configuration. Check out the + [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + + +@add_start_docstrings( + "The bare Yi Model outputting raw hidden-states without any specific head on top.", + Yi_START_DOCSTRING, +) +class YiPreTrainedModel(PreTrainedModel): + config_class = YiConfig + base_model_prefix = "model" + supports_gradient_checkpointing = True + _no_split_modules = ["YiDecoderLayer"] + _skip_keys_device_placement = "past_key_values" + + def _init_weights(self, module): + std = self.config.initializer_range + if isinstance(module, nn.Linear): + module.weight.data.normal_(mean=0.0, std=std) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=std) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + + def _set_gradient_checkpointing(self, module, value=False): + if isinstance(module, YiModel): + module.gradient_checkpointing = value + + +Yi_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide + it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see + `past_key_values`). + + If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`] + and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more + information on the default strategy. + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.n_positions - 1]`. + + [What are position IDs?](../glossary#position-ids) + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape + `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that + don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all + `decoder_input_ids` of shape `(batch_size, sequence_length)`. + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert `input_ids` indices into associated vectors than the + model's internal embedding lookup matrix. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +@add_start_docstrings( + "The bare Yi Model outputting raw hidden-states without any specific head on top.", + Yi_START_DOCSTRING, +) +class YiModel(YiPreTrainedModel): + """ + Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`YiDecoderLayer`] + + Args: + config: YiConfig + """ + + def __init__(self, config: YiConfig): + super().__init__(config) + self.padding_idx = config.pad_token_id + self.vocab_size = config.vocab_size + + self.embed_tokens = nn.Embedding( + config.vocab_size, config.hidden_size, self.padding_idx + ) + self.layers = nn.ModuleList( + [YiDecoderLayer(config) for _ in range(config.num_hidden_layers)] + ) + + self.norm = YiRMSNorm(config.hidden_size, eps=config.rms_norm_eps) + + self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.embed_tokens + + def set_input_embeddings(self, value): + self.embed_tokens = value + + # Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask + def _prepare_decoder_attention_mask( + self, attention_mask, input_ids, inputs_embeds, past_key_values_length + ): + input_shape = input_ids.shape + # create causal mask + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + combined_attention_mask = None + if input_shape[-1] > 1: + combined_attention_mask = _make_causal_mask( + input_shape, + inputs_embeds.dtype, + device=inputs_embeds.device, + past_key_values_length=past_key_values_length, + ) + + if attention_mask is not None: + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + expanded_attn_mask = _expand_mask( + attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1] + ).to(inputs_embeds.device) + combined_attention_mask = ( + expanded_attn_mask + if combined_attention_mask is None + else expanded_attn_mask + combined_attention_mask + ) + + return combined_attention_mask + + @add_start_docstrings_to_model_forward(Yi_INPUTS_DOCSTRING) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutputWithPast]: + output_attentions = ( + output_attentions + if output_attentions is not None + else self.config.output_attentions + ) + output_hidden_states = ( + output_hidden_states + if output_hidden_states is not None + else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + + return_dict = ( + return_dict if return_dict is not None else self.config.use_return_dict + ) + + # retrieve input_ids and inputs_embeds + if input_ids is not None and inputs_embeds is not None: + raise ValueError( + "You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time" + ) + elif input_ids is not None: + batch_size, seq_length = input_ids.shape + elif inputs_embeds is not None: + batch_size, seq_length, _ = inputs_embeds.shape + else: + raise ValueError( + "You have to specify either decoder_input_ids or decoder_inputs_embeds" + ) + + seq_length_with_past = seq_length + past_key_values_length = 0 + + if past_key_values is not None: + past_key_values_length = past_key_values[0][0].shape[2] + seq_length_with_past = seq_length_with_past + past_key_values_length + + if position_ids is None: + device = input_ids.device if input_ids is not None else inputs_embeds.device + position_ids = torch.arange( + past_key_values_length, + seq_length + past_key_values_length, + dtype=torch.long, + device=device, + ) + position_ids = position_ids.unsqueeze(0).view(-1, seq_length) + else: + position_ids = position_ids.view(-1, seq_length).long() + + if inputs_embeds is None: + inputs_embeds = self.embed_tokens(input_ids) + + if not is_flash_attn_available(): + # embed positions + if attention_mask is None: + attention_mask = torch.ones( + (batch_size, seq_length_with_past), + dtype=torch.bool, + device=inputs_embeds.device, + ) + attention_mask = self._prepare_decoder_attention_mask( + attention_mask, + input_ids, + inputs_embeds, + past_key_values_length, + ) + else: + attention_mask = None + + hidden_states = inputs_embeds + if self.gradient_checkpointing and self.training: + if use_cache: + logger.warning_once( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + ) + use_cache = False + + # decoder layers + all_hidden_states = () if output_hidden_states else None + all_self_attns = () if output_attentions else None + next_decoder_cache = () if use_cache else None + + for idx, decoder_layer in enumerate(self.layers): + if output_hidden_states: + all_hidden_states += (hidden_states,) + + past_key_value = ( + past_key_values[idx] if past_key_values is not None else None + ) + + if self.gradient_checkpointing and self.training: + + def create_custom_forward(module): + def custom_forward(*inputs): + # None for past_key_value + return module(*inputs, past_key_value, output_attentions) + + return custom_forward + + layer_outputs = torch.utils.checkpoint.checkpoint( + create_custom_forward(decoder_layer), + hidden_states, + attention_mask, + position_ids, + ) + else: + layer_outputs = decoder_layer( + hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + ) + + hidden_states = layer_outputs[0] + + if use_cache: + next_decoder_cache += (layer_outputs[2 if output_attentions else 1],) + + if output_attentions: + all_self_attns += (layer_outputs[1],) + + hidden_states = self.norm(hidden_states) + # add hidden states from the last decoder layer + if output_hidden_states: + all_hidden_states += (hidden_states,) + + next_cache = next_decoder_cache if use_cache else None + if not return_dict: + return tuple( + v + for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] + if v is not None + ) + return BaseModelOutputWithPast( + last_hidden_state=hidden_states, + past_key_values=next_cache, + hidden_states=all_hidden_states, + attentions=all_self_attns, + ) + + +class YiForCausalLM(YiPreTrainedModel): + _tied_weights_keys = ["lm_head.weight"] + + def __init__(self, config): + super().__init__(config) + self.model = YiModel(config) + + self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.model.embed_tokens + + def set_input_embeddings(self, value): + self.model.embed_tokens = value + + def get_output_embeddings(self): + return self.lm_head + + def set_output_embeddings(self, new_embeddings): + self.lm_head = new_embeddings + + def set_decoder(self, decoder): + self.model = decoder + + def get_decoder(self): + return self.model + + @add_start_docstrings_to_model_forward(Yi_INPUTS_DOCSTRING) + @replace_return_docstrings( + output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC + ) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, CausalLMOutputWithPast]: + r""" + Args: + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., + config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored + (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. + + Returns: + + Example: + + ```python + >>> from transformers import AutoTokenizer, YiForCausalLM + + >>> model = YiForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS) + >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER) + + >>> prompt = "Hey, are you conscious? Can you talk to me?" + >>> inputs = tokenizer(prompt, return_tensors="pt") + + >>> # Generate + >>> generate_ids = model.generate(inputs.input_ids, max_length=30) + >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] + "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." + ```""" + + output_attentions = ( + output_attentions + if output_attentions is not None + else self.config.output_attentions + ) + output_hidden_states = ( + output_hidden_states + if output_hidden_states is not None + else self.config.output_hidden_states + ) + return_dict = ( + return_dict if return_dict is not None else self.config.use_return_dict + ) + + # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) + outputs = self.model( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + hidden_states = outputs[0] + logits = self.lm_head(hidden_states) + + loss = None + if labels is not None: + # Shift so that tokens < n predict n + shift_logits = logits[..., :-1, :].contiguous() + shift_labels = labels[..., 1:].contiguous() + # Flatten the tokens + loss_fct = CrossEntropyLoss() + shift_logits = shift_logits.view(-1, self.config.vocab_size) + shift_labels = shift_labels.view(-1) + # Enable model parallelism + shift_labels = shift_labels.to(shift_logits.device) + loss = loss_fct(shift_logits, shift_labels) + + if not return_dict: + output = (logits,) + outputs[1:] + return (loss,) + output if loss is not None else output + + return CausalLMOutputWithPast( + loss=loss, + logits=logits, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + def prepare_inputs_for_generation( + self, + input_ids, + past_key_values=None, + attention_mask=None, + inputs_embeds=None, + **kwargs, + ): + if past_key_values: + input_ids = input_ids[:, -1:] + + position_ids = kwargs.get("position_ids", None) + if attention_mask is not None and position_ids is None: + # create position_ids on the fly for batch generation + position_ids = attention_mask.long().cumsum(-1) - 1 + position_ids.masked_fill_(attention_mask == 0, 1) + if past_key_values: + position_ids = position_ids[:, -1].unsqueeze(-1) + + # if `inputs_embeds` are passed, we only want to use them in the 1st generation step + if inputs_embeds is not None and past_key_values is None: + model_inputs = {"inputs_embeds": inputs_embeds} + else: + model_inputs = {"input_ids": input_ids} + + model_inputs.update( + { + "position_ids": position_ids, + "past_key_values": past_key_values, + "use_cache": kwargs.get("use_cache"), + "attention_mask": attention_mask, + } + ) + return model_inputs + + @staticmethod + def _reorder_cache(past_key_values, beam_idx): + reordered_past = () + for layer_past in past_key_values: + reordered_past += ( + tuple( + past_state.index_select(0, beam_idx.to(past_state.device)) + for past_state in layer_past + ), + ) + return reordered_past + + +@add_start_docstrings( + """ + The Yi Model transformer with a sequence classification head on top (linear layer). + + [`YiForSequenceClassification`] uses the last token in order to do the classification, as other causal models + (e.g. GPT-2) do. + + Since it does classification on the last token, it requires to know the position of the last token. If a + `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If + no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the + padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in + each row of the batch). + """, + Yi_START_DOCSTRING, +) +class YiForSequenceClassification(YiPreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + self.model = YiModel(config) + self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.model.embed_tokens + + def set_input_embeddings(self, value): + self.model.embed_tokens = value + + @add_start_docstrings_to_model_forward(Yi_INPUTS_DOCSTRING) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, SequenceClassifierOutputWithPast]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If + `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + """ + return_dict = ( + return_dict if return_dict is not None else self.config.use_return_dict + ) + + transformer_outputs = self.model( + input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + hidden_states = transformer_outputs[0] + logits = self.score(hidden_states) + + if input_ids is not None: + batch_size = input_ids.shape[0] + else: + batch_size = inputs_embeds.shape[0] + + if self.config.pad_token_id is None and batch_size != 1: + raise ValueError( + "Cannot handle batch sizes > 1 if no padding token is defined." + ) + if self.config.pad_token_id is None: + sequence_lengths = -1 + else: + if input_ids is not None: + sequence_lengths = ( + torch.eq(input_ids, self.config.pad_token_id).long().argmax(-1) - 1 + ).to(logits.device) + else: + sequence_lengths = -1 + + pooled_logits = logits[ + torch.arange(batch_size, device=logits.device), sequence_lengths + ] + + loss = None + if labels is not None: + labels = labels.to(logits.device) + if self.config.problem_type is None: + if self.num_labels == 1: + self.config.problem_type = "regression" + elif self.num_labels > 1 and ( + labels.dtype == torch.long or labels.dtype == torch.int + ): + self.config.problem_type = "single_label_classification" + else: + self.config.problem_type = "multi_label_classification" + + if self.config.problem_type == "regression": + loss_fct = MSELoss() + if self.num_labels == 1: + loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) + else: + loss = loss_fct(pooled_logits, labels) + elif self.config.problem_type == "single_label_classification": + loss_fct = CrossEntropyLoss() + loss = loss_fct( + pooled_logits.view(-1, self.num_labels), labels.view(-1) + ) + elif self.config.problem_type == "multi_label_classification": + loss_fct = BCEWithLogitsLoss() + loss = loss_fct(pooled_logits, labels) + if not return_dict: + output = (pooled_logits,) + transformer_outputs[1:] + return ((loss,) + output) if loss is not None else output + + return SequenceClassifierOutputWithPast( + loss=loss, + logits=pooled_logits, + past_key_values=transformer_outputs.past_key_values, + hidden_states=transformer_outputs.hidden_states, + attentions=transformer_outputs.attentions, + ) diff --git a/pytorch_model-00001-of-00077.bin b/pytorch_model-00001-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..c8ad15561427ba8ad9c51a0c76cd38f3e0862a37 --- /dev/null +++ b/pytorch_model-00001-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7f008b55313ea3b48074112fbeffbdf9d948e62f1eec231d1ec702e8342fda1b +size 917505413 diff --git a/pytorch_model-00002-of-00077.bin b/pytorch_model-00002-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..d0763a2cf460ae19747c79046ea0508b3adf41df --- /dev/null +++ b/pytorch_model-00002-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d1d5958a01c26b1820365bee324a4c5e2e9c2e3316aaf09665ffc0d4b48a1305 +size 822086554 diff --git a/pytorch_model-00003-of-00077.bin b/pytorch_model-00003-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..4b59e674f2dc31bb215e634017b6e508dee2f276 --- /dev/null +++ b/pytorch_model-00003-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d85448e97ba75a31b2541f1ab0d66cbb886cbae65bdc8be458be03a16f0be0b8 +size 822115836 diff --git a/pytorch_model-00004-of-00077.bin b/pytorch_model-00004-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..2a3c6ec4f43fe5b02aed0a92652bdfcedeb98dd1 --- /dev/null +++ b/pytorch_model-00004-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c7e498889f74e14dd7a92627567474a1cfc8753b38016f4e019656dadb743d39 +size 822115836 diff --git a/pytorch_model-00005-of-00077.bin b/pytorch_model-00005-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..fcd5c00fbea50245a2e9cad20c7db3117b5bb708 --- /dev/null +++ b/pytorch_model-00005-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:427ed70ecfe0bd45b3d50c37466592b561ef784f5595edcd9a7f2383f9231ca6 +size 998276267 diff --git a/pytorch_model-00006-of-00077.bin b/pytorch_model-00006-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..800d6b0a93052e72f8cc3ef1baaa8c20dd4745e4 --- /dev/null +++ b/pytorch_model-00006-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c3661d7ae3fabd493d971141797dca656282238e5ba1a4453ed93be193234c07 +size 998276267 diff --git a/pytorch_model-00007-of-00077.bin b/pytorch_model-00007-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..45ff39f79aeb1c63c355df10784e69d63d7f56bd --- /dev/null +++ b/pytorch_model-00007-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d6ea746408de553092b8692ed3500dbef3d42001ede84b2678baa74ab0cc4a78 +size 822086554 diff --git a/pytorch_model-00008-of-00077.bin b/pytorch_model-00008-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..2e15ba9f8eafc377acd8fcaa2cd97664c118c7f2 --- /dev/null +++ b/pytorch_model-00008-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e2d7e1ce5a185916d05dae8e41c0fcc3009d0b8baca442494c313af0e012f740 +size 822115836 diff --git a/pytorch_model-00009-of-00077.bin b/pytorch_model-00009-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..694d145b7d6672d3da79f67cc833e3c7108f47a5 --- /dev/null +++ b/pytorch_model-00009-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d8dde5951a9aa6cf0d0723df2cc66740564830f92fbeb308ac2f6a640aaf8b6a +size 822115836 diff --git a/pytorch_model-00010-of-00077.bin b/pytorch_model-00010-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..c38f7da1a35981ffae2672bf10ebe1c0c034d2ae --- /dev/null +++ b/pytorch_model-00010-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bf011a401f02cc324d3d196e17781f41ea24ed7c24340d7572a3c6dfd7ee9923 +size 998276267 diff --git a/pytorch_model-00011-of-00077.bin b/pytorch_model-00011-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..784b912a1ee7e9bc6e4cc3b880a9cabcb28b430a --- /dev/null +++ b/pytorch_model-00011-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b64d4a714ae756d373b2965b4d1d907d7c52df995c4d25e23b8795a2f7c60a80 +size 998276267 diff --git a/pytorch_model-00012-of-00077.bin b/pytorch_model-00012-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..f14bfa27ca70982fe3a7218a72852a2a8be34313 --- /dev/null +++ b/pytorch_model-00012-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7ce8891f2e1119a35760455f8c7ef639810af52c521611fc7ff3a00fb13411df +size 822086554 diff --git a/pytorch_model-00013-of-00077.bin b/pytorch_model-00013-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..d7bf9fffb30cfebab6a18cba5b804e48c93bc4a7 --- /dev/null +++ b/pytorch_model-00013-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:632277bd09e7cb9f8011e98803e22e73790767f52591488b9ded51f7ebcc8ab2 +size 822115836 diff --git a/pytorch_model-00014-of-00077.bin b/pytorch_model-00014-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..64d8d0e9d8a22eec0a895d9dc9bb2b9ae5eeee19 --- /dev/null +++ b/pytorch_model-00014-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5129cb47226623d984a47af84a827911571b76bc3a624a27baa6487cef1ea2a6 +size 822115836 diff --git a/pytorch_model-00015-of-00077.bin b/pytorch_model-00015-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..622f525e853d9bf9c6e8dce3acc9951134ce8f8a --- /dev/null +++ b/pytorch_model-00015-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5c59fc995b4c6b191db065abd754dc69515c41fbb838045b6e195c226087e1b6 +size 998276267 diff --git a/pytorch_model-00016-of-00077.bin b/pytorch_model-00016-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..db371e7c8110041bab45a9782adf38d8de8f7099 --- /dev/null +++ b/pytorch_model-00016-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6c6ecbf912cc7fccadd4eaf6cbfe7f42e9265b29660fa14ce748eecd74e0669f +size 998276267 diff --git a/pytorch_model-00017-of-00077.bin b/pytorch_model-00017-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..6694de1cac44d0de4bdb6d36ee701644c61c5e22 --- /dev/null +++ b/pytorch_model-00017-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:577ad5fa496a1034241ce4aef978e326737ff41e30351dacb0bd9143d26e6a44 +size 822086554 diff --git a/pytorch_model-00018-of-00077.bin b/pytorch_model-00018-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..d43d4f4f39e859fde43b85183703ccc883ad4e2b --- /dev/null +++ b/pytorch_model-00018-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:091383a2d940d65c731c293696d6751795506e300bcab0c4cb557082235d1699 +size 822115836 diff --git a/pytorch_model-00019-of-00077.bin b/pytorch_model-00019-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..0302a964cfed96e30571a18a57e70a7a147ef472 --- /dev/null +++ b/pytorch_model-00019-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f6cee5bac09e019e40de352a15b07eabd4b20cd712dc67624a215c546a83d596 +size 822115836 diff --git a/pytorch_model-00020-of-00077.bin b/pytorch_model-00020-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..8a370f959f0c822dc2dd982bfdc1c4f14e8cb163 --- /dev/null +++ b/pytorch_model-00020-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d7fc14c3764aa051c4ce8c9246d367cc096d65825b309bc3a4b4f151fb7b2551 +size 998276267 diff --git a/pytorch_model-00021-of-00077.bin b/pytorch_model-00021-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..009711433a90a15e3beb6600384241866f92d44c --- /dev/null +++ b/pytorch_model-00021-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d4aca8fb901bcd54868adcb56463d45f92d70ca93fc7adcbc1a81309599b6203 +size 998276267 diff --git a/pytorch_model-00022-of-00077.bin b/pytorch_model-00022-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..36fe7d1db19ee12093641ae6c066c61a52428646 --- /dev/null +++ b/pytorch_model-00022-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c09fb0ac57a401e069660daacce9c5f3e37ffaca9798ee17cf19a9736b1fd04e +size 822086554 diff --git a/pytorch_model-00023-of-00077.bin b/pytorch_model-00023-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..e5bcd93b7a7f48d10f021ad845fe3a3edfab870f --- /dev/null +++ b/pytorch_model-00023-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:65060f8bd8a92f503a12716ba53d821bc260e0e7586373e439598adf3a4eeece +size 822115836 diff --git a/pytorch_model-00024-of-00077.bin b/pytorch_model-00024-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..ef8e39891fde40c41bac2511731777ebf4a6e0db --- /dev/null +++ b/pytorch_model-00024-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3657a48d08843947796618bfdb85d64e93a3a50d79c1418c24d0bd56ba634497 +size 822115836 diff --git a/pytorch_model-00025-of-00077.bin b/pytorch_model-00025-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..0ae6ba068f1006ad5e1c7095642d53294d11bd26 --- /dev/null +++ b/pytorch_model-00025-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6a69557c8acd7a21e2b8dff8c0af8b55a4200f1b13cdace4d6ca340ca1a18b64 +size 998276267 diff --git a/pytorch_model-00026-of-00077.bin b/pytorch_model-00026-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..f0f0878465a4c9d8ffb171a0975e9837c12268e7 --- /dev/null +++ b/pytorch_model-00026-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a752ac70b883821eca590a01b922f394453a7de636ea4142e71cdf0dac6d9123 +size 998276267 diff --git a/pytorch_model-00027-of-00077.bin b/pytorch_model-00027-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..0594f1abfa964f52344a3b582c188ac4b7204286 --- /dev/null +++ b/pytorch_model-00027-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f795f1f4229860229a3937540cf317a2eb49975362c9e7aea2393552fb27ba02 +size 822086554 diff --git a/pytorch_model-00028-of-00077.bin b/pytorch_model-00028-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..a90bb00e25d057a886d85ea3bce3d16400149ce6 --- /dev/null +++ b/pytorch_model-00028-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0bb4c67261fe159a71d87472482c517e3c36526897417385c9dcc2a616690120 +size 822115836 diff --git a/pytorch_model-00029-of-00077.bin b/pytorch_model-00029-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..23be46fbe2771293734af88a412259ccd88f0353 --- /dev/null +++ b/pytorch_model-00029-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9ec3a2fde5adf32e862770e5e8edc3a917b3774f46dd5be3cffd04cf5b4b2f8e +size 822115836 diff --git a/pytorch_model-00030-of-00077.bin b/pytorch_model-00030-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..bfea8b45d4fd259aad76dd0c22558b8847f2094e --- /dev/null +++ b/pytorch_model-00030-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4b46b3a57d749692914249523bbe60845cc0f0cae57283a2cd8bcce5b1aff4f9 +size 998276267 diff --git a/pytorch_model-00031-of-00077.bin b/pytorch_model-00031-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..2043979731ca908730e29cf64ba337848973d9ec --- /dev/null +++ b/pytorch_model-00031-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:79c30b89464c2e3a02c1f508327902e3f3606c72bde2858920b0608415ec3c6a +size 998276267 diff --git a/pytorch_model-00032-of-00077.bin b/pytorch_model-00032-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..973f6ff6fa36b1ccd6108a60e4274729eaa78f29 --- /dev/null +++ b/pytorch_model-00032-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:20fa61c860fe59da46cb0223dfb938369ffd35702dee7170b86af6a79f2945a8 +size 822086554 diff --git a/pytorch_model-00033-of-00077.bin b/pytorch_model-00033-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..b19fa0e089c4d6b88bfd313cc0fa63281fef7e0a --- /dev/null +++ b/pytorch_model-00033-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:174b587a621052f4debf764127e514ae9f9a604a468bf793a8f45be9832abfdf +size 822115836 diff --git a/pytorch_model-00034-of-00077.bin b/pytorch_model-00034-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..66405a8e4781e7775bcd564f80586ad7bdf31494 --- /dev/null +++ b/pytorch_model-00034-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3c12b266a1a6b6570a6279dd79f756d44850946e3270805a71e12f26cbbe3e95 +size 822115836 diff --git a/pytorch_model-00035-of-00077.bin b/pytorch_model-00035-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..8d1be8de9b53fa8467ff6cb6a01c392d6903fb10 --- /dev/null +++ b/pytorch_model-00035-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b32abd9c5a34eba20bc8faa401c45186535830b22668119c3cd3f36cffbd6764 +size 998276267 diff --git a/pytorch_model-00036-of-00077.bin b/pytorch_model-00036-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..36b914f2cd75fc7ab53871d9ce7c450c6d5963bd --- /dev/null +++ b/pytorch_model-00036-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b947853d3d1a96f075030708eedc0bb833759f3903d7d5ee1173fc9787765483 +size 998276267 diff --git a/pytorch_model-00037-of-00077.bin b/pytorch_model-00037-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..87a6c784ae16cbfcaeceb5ca7b85b71353f990e1 --- /dev/null +++ b/pytorch_model-00037-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:99292b9bd1c4c22d493953d81f6c30c780f54e1ba015cda5326da5071321bda5 +size 822086554 diff --git a/pytorch_model-00038-of-00077.bin b/pytorch_model-00038-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..af5a51eec24b02accf7ee86fad77a0d07330ccb6 --- /dev/null +++ b/pytorch_model-00038-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d017a39e08f8224a4318c24af3df96d5a711c4855eb6ed1a35e7d50ad48a9607 +size 822115836 diff --git a/pytorch_model-00039-of-00077.bin b/pytorch_model-00039-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..ab5704c968469b8a5cd9d129b49964c8d10f54b1 --- /dev/null +++ b/pytorch_model-00039-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:89a3f91da56a76427563ea5af3d8145e3270d9a8c5f5fc9ee7c00c616c2c17c6 +size 822115836 diff --git a/pytorch_model-00040-of-00077.bin b/pytorch_model-00040-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..b65170fd8de17507022e4c35d62217ad8381b543 --- /dev/null +++ b/pytorch_model-00040-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a4745381c69850027bbfcfac1bdb6b77e33b9497dd8a6ad13df9020162156d41 +size 998276267 diff --git a/pytorch_model-00041-of-00077.bin b/pytorch_model-00041-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..b80f5fa7ffb346cca92e94e4beaee3e9c74a5482 --- /dev/null +++ b/pytorch_model-00041-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8611ec684ceeed728433ae9b051ab2293a00f4f5489f8794d440dbe7770fff99 +size 998276267 diff --git a/pytorch_model-00042-of-00077.bin b/pytorch_model-00042-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..a46b75d206d932ff1642d88ea1f1a5b701deea53 --- /dev/null +++ b/pytorch_model-00042-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:928ebebdae9638a94561cf34f2a8d500719e6f80be8f5cb6f7edef64f1245c47 +size 822086554 diff --git a/pytorch_model-00043-of-00077.bin b/pytorch_model-00043-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..8fa0c6707583be80a82e8eeecaab1c6f365a8843 --- /dev/null +++ b/pytorch_model-00043-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d1f1338d53c4d430221c1cb38dce255596250033b67bdded95391dfbd9337dc2 +size 822115836 diff --git a/pytorch_model-00044-of-00077.bin b/pytorch_model-00044-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..c5c03192e604fec8b5c66ef0eca156b7214d1751 --- /dev/null +++ b/pytorch_model-00044-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:87e392be7a0b8b80aeb49f2fcc70c090749f0c9074c2cf78acf764032fcc5017 +size 822115836 diff --git a/pytorch_model-00045-of-00077.bin b/pytorch_model-00045-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..2d462b7bc2440a247c932aa16e4763b5b85a9feb --- /dev/null +++ b/pytorch_model-00045-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:49353c7423c270f1b8fa29f4ce720609737eeedf24fd78d9b8e2930db4da5233 +size 998276267 diff --git a/pytorch_model-00046-of-00077.bin b/pytorch_model-00046-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..dafbea0e6dcf8f0c193a8002c9563326b3080a75 --- /dev/null +++ b/pytorch_model-00046-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:77c94f5cd4229bda2cfef666c2d241f664835cd7e59f815b4907095e1ff1e9e8 +size 998276267 diff --git a/pytorch_model-00047-of-00077.bin b/pytorch_model-00047-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..94802a4998ee25dc2ed41a540576d8fd145a946d --- /dev/null +++ b/pytorch_model-00047-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:39c247eca8010b0e54c40b46e94328c99c8b7b3ff79c9bc2701df0d75c151e19 +size 822086554 diff --git a/pytorch_model-00048-of-00077.bin b/pytorch_model-00048-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..c3dcd4dc0a0e3896a2dd37412c27e67b43ff42f7 --- /dev/null +++ b/pytorch_model-00048-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2435e9ae88b6a77451fae9d1fab807c0d129cfa56cfd96c00986ab444dc5fb9d +size 822115836 diff --git a/pytorch_model-00049-of-00077.bin b/pytorch_model-00049-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..7b4c022d3cf2704eeeace44c60f4d94ff7051f50 --- /dev/null +++ b/pytorch_model-00049-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e66814fc448e780febd3752272afdfed95b01e3dd3ecda6912873003b977bcf8 +size 822115836 diff --git a/pytorch_model-00050-of-00077.bin b/pytorch_model-00050-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..b9202ca8526be63b016a7710d064d95b88f2b417 --- /dev/null +++ b/pytorch_model-00050-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:04e946d3eced576bb5358e9bbea166b40aa37c407314b52f879a86db9c29ba96 +size 998276267 diff --git a/pytorch_model-00051-of-00077.bin b/pytorch_model-00051-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..464ae9ee2474e04f51bd82e6b5cc159c634a6b3d --- /dev/null +++ b/pytorch_model-00051-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cbc4fee258661822d789472dd52983e8b21059cc454db152f4547102971629a2 +size 998276267 diff --git a/pytorch_model-00052-of-00077.bin b/pytorch_model-00052-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..fe7d7d341f0d336bc0eccd543727c72761bcdd1f --- /dev/null +++ b/pytorch_model-00052-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2c9d9999001b22d0edd137573cdccb2133a1d18cd4e112e92afe62dd1826844a +size 822086554 diff --git a/pytorch_model-00053-of-00077.bin b/pytorch_model-00053-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..9f0b373b8a34da1bdb78d4262fb3a3335f0acf6d --- /dev/null +++ b/pytorch_model-00053-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7d3b166204ebcc35fb8979bd5d44ed4ae2f2d0a836b880ef3ab11e9f7f5c658a +size 822115836 diff --git a/pytorch_model-00054-of-00077.bin b/pytorch_model-00054-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..d70cbda5eee2917f979f4b6bffc8b452f62f3043 --- /dev/null +++ b/pytorch_model-00054-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:831b2bacac1ab465b10cd984fcde79734ba5e030f0f672bc1f07a32cb52b8d67 +size 822115836 diff --git a/pytorch_model-00055-of-00077.bin b/pytorch_model-00055-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..311e3afa11a2ab1bfde363b0d8282d33288f978b --- /dev/null +++ b/pytorch_model-00055-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c3e4a69f0bf207b8c4ad597453ec3062375a010e5c58c02aaa1c6644ace29a06 +size 998276267 diff --git a/pytorch_model-00056-of-00077.bin b/pytorch_model-00056-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..a791e1bbefdb62d841b3c66224c07d2092bd4719 --- /dev/null +++ b/pytorch_model-00056-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:34e0821e67957fe880dcbe0124447a6dde7f07e9b8eb53bb1d21c8fe7793dd2f +size 998276267 diff --git a/pytorch_model-00057-of-00077.bin b/pytorch_model-00057-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..72a2e2de8749f06ed700fb741a05de01d0802371 --- /dev/null +++ b/pytorch_model-00057-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c88018022725db64146c8da51191d911a7c1e3fc14fbee149aa67b306f0353f2 +size 822086554 diff --git a/pytorch_model-00058-of-00077.bin b/pytorch_model-00058-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..efec277446012db8f302087d02777db19d1af7c5 --- /dev/null +++ b/pytorch_model-00058-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5698fc835aa0e5257f019a5c27658a413e3f45baca949bcd6649ddeb21bfb674 +size 822115836 diff --git a/pytorch_model-00059-of-00077.bin b/pytorch_model-00059-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..192244a6fa9020673c8549f4bd9fcaa205c4a717 --- /dev/null +++ b/pytorch_model-00059-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e916e7eda63318a6229c6901b30278d014530fdf9a04373ba47cd8d9232a76cb +size 822115836 diff --git a/pytorch_model-00060-of-00077.bin b/pytorch_model-00060-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..8bfc6960addcfc4cdd620f5defaee1512b20f515 --- /dev/null +++ b/pytorch_model-00060-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:476ad32788b448fde2d0f261f8ed451e0e62ab1d81f3639a00eb1adda3b6c307 +size 998276267 diff --git a/pytorch_model-00061-of-00077.bin b/pytorch_model-00061-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..2ddd3d916db6faed2743b8d9efb9f1641309cb87 --- /dev/null +++ b/pytorch_model-00061-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:03e6931d7d0d862d8a638f0ed01abd28534dc27f9cf3021dd467ee820c0619be +size 998276267 diff --git a/pytorch_model-00062-of-00077.bin b/pytorch_model-00062-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..607845552909af26ea7a32ff14267ebc7cc56574 --- /dev/null +++ b/pytorch_model-00062-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4e6ecac95a2b440766dc2593fdaf83efbf797a5a7aa5f9d19e5f5020e629f302 +size 822086554 diff --git a/pytorch_model-00063-of-00077.bin b/pytorch_model-00063-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..02e750a235140379475d5c87b6167cd33a9ac81b --- /dev/null +++ b/pytorch_model-00063-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a523712e0893e41d1c680da88f23b7c7d7e5d4dbb14d2c4727cb2a46b3936978 +size 822115836 diff --git a/pytorch_model-00064-of-00077.bin b/pytorch_model-00064-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..8c8f338151550e36b562be98d2b69b602048d2db --- /dev/null +++ b/pytorch_model-00064-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:655c7a171c8d0764f196316ac13a9042d733c5745af46886b69302d45b60daef +size 822115836 diff --git a/pytorch_model-00065-of-00077.bin b/pytorch_model-00065-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..19185ee5f4820e78caefc1e59d1fbe4ea4d4f63e --- /dev/null +++ b/pytorch_model-00065-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:19a0edc0968ddd22b0b2a741fd6d52b2f9c124246951e6e5b7629af99cd87865 +size 998276267 diff --git a/pytorch_model-00066-of-00077.bin b/pytorch_model-00066-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..f847e52f75a74c98f7b10637db92b4fdc23594c0 --- /dev/null +++ b/pytorch_model-00066-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8c4e5e3d9d326db08dbf696ef0bca3202963ec74f9d5d541db625f2f79a739d8 +size 998276267 diff --git a/pytorch_model-00067-of-00077.bin b/pytorch_model-00067-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..86faf18f8f9d55da7c5f625b52859d5a9cc57518 --- /dev/null +++ b/pytorch_model-00067-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:049ea3f037d6bf8b7e8342e7926d585d500bafefa868e5e0982ef5843324ac69 +size 822086554 diff --git a/pytorch_model-00068-of-00077.bin b/pytorch_model-00068-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..770f8cd239704606911f826b75f7718b742489b9 --- /dev/null +++ b/pytorch_model-00068-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:63cc0e165f913e8b5a738ea9e0000f95c8e6091f06a29e873e3a5680947df044 +size 822115836 diff --git a/pytorch_model-00069-of-00077.bin b/pytorch_model-00069-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..158d9463a86cb1a82a5d0deb5aed891f7485f33b --- /dev/null +++ b/pytorch_model-00069-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ea38f1c8ab21a3cecf9e4b0c7e97b5e2594c4c68358bb12d53da915962715f77 +size 822115836 diff --git a/pytorch_model-00070-of-00077.bin b/pytorch_model-00070-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..f2de9b8aeb1fe78dea7a102433370ea5be5dbbb9 --- /dev/null +++ b/pytorch_model-00070-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4a1bb4b0e7cf3780eb7886ab023682088258ad7ae02fffc55ef4b2209fa1c243 +size 998276267 diff --git a/pytorch_model-00071-of-00077.bin b/pytorch_model-00071-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..28f7c5c7e0e2c2dd4901cc45542b3a2ee00ce5ab --- /dev/null +++ b/pytorch_model-00071-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f2ea23dca999a473c476f99f546b3c204db86e9aaad57f89ae85ce913f2f3c35 +size 998276267 diff --git a/pytorch_model-00072-of-00077.bin b/pytorch_model-00072-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..1a8c4abc7e3ef92b1f13efb7b215885c4ae52ea3 --- /dev/null +++ b/pytorch_model-00072-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2b132e91cda418a1c176f39844aaf8bb09eff9bd22824702aab98aa0aab5a63e +size 822086554 diff --git a/pytorch_model-00073-of-00077.bin b/pytorch_model-00073-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..3b9610fa06f82c5e3928ab1fba160227a4bad386 --- /dev/null +++ b/pytorch_model-00073-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:55c26179711993c05d4724fc8904a4719e7c8cdbeedf87642e5fbb761869346e +size 822115836 diff --git a/pytorch_model-00074-of-00077.bin b/pytorch_model-00074-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..685b8f5dd996c76984f9d4a21333f8b307e2a3b0 --- /dev/null +++ b/pytorch_model-00074-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ed77f64396c4a7b638573c2104c04a958e49f91d666cca802c844b969c0b2457 +size 822115836 diff --git a/pytorch_model-00075-of-00077.bin b/pytorch_model-00075-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..b1e2935d77ae30ec98b57aefe3c2cb6805978de8 --- /dev/null +++ b/pytorch_model-00075-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:283fba35c4a7950cce74c46d236ac9d76932b228217249565b17d7e4cc50afff +size 998276267 diff --git a/pytorch_model-00076-of-00077.bin b/pytorch_model-00076-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..a256c6bd345b143743369dee665131300e30aefd --- /dev/null +++ b/pytorch_model-00076-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a54685f9b85a682d6202d550aff653a2cdd8d33967d160e00a0ab5ee3bdc7709 +size 998290876 diff --git a/pytorch_model-00077-of-00077.bin b/pytorch_model-00077-of-00077.bin new file mode 100644 index 0000000000000000000000000000000000000000..50a3e6832e01420ce27d9e146f1ec38351f7327e --- /dev/null +++ b/pytorch_model-00077-of-00077.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1b60abb3209025540e6df6ead07a42e4b44e7ae94e7c692b03f6e8583520cd7d +size 917505413 diff --git a/pytorch_model.bin.index.json b/pytorch_model.bin.index.json new file mode 100644 index 0000000000000000000000000000000000000000..4ea81a6d4652988bfe85d85e838c9422e70a5838 --- /dev/null +++ b/pytorch_model.bin.index.json @@ -0,0 +1,550 @@ +{ + "metadata": { + "total_size": 68777834496 + }, + "weight_map": { + "lm_head.weight": "pytorch_model-00077-of-00077.bin", + "model.embed_tokens.weight": "pytorch_model-00001-of-00077.bin", + "model.layers.0.ln1.weight": "pytorch_model-00003-of-00077.bin", + "model.layers.0.ln2.weight": "pytorch_model-00003-of-00077.bin", + "model.layers.0.mlp.down_proj.weight": "pytorch_model-00002-of-00077.bin", + "model.layers.0.mlp.gate_proj.weight": "pytorch_model-00002-of-00077.bin", + "model.layers.0.mlp.up_proj.weight": "pytorch_model-00003-of-00077.bin", + "model.layers.0.self_attn.k_proj.weight": "pytorch_model-00002-of-00077.bin", + "model.layers.0.self_attn.o_proj.weight": "pytorch_model-00002-of-00077.bin", + "model.layers.0.self_attn.q_proj.weight": "pytorch_model-00002-of-00077.bin", + "model.layers.0.self_attn.v_proj.weight": "pytorch_model-00002-of-00077.bin", + "model.layers.1.ln1.weight": "pytorch_model-00004-of-00077.bin", + "model.layers.1.ln2.weight": "pytorch_model-00004-of-00077.bin", + "model.layers.1.mlp.down_proj.weight": "pytorch_model-00004-of-00077.bin", + "model.layers.1.mlp.gate_proj.weight": "pytorch_model-00003-of-00077.bin", + "model.layers.1.mlp.up_proj.weight": "pytorch_model-00004-of-00077.bin", + "model.layers.1.self_attn.k_proj.weight": "pytorch_model-00003-of-00077.bin", + "model.layers.1.self_attn.o_proj.weight": "pytorch_model-00003-of-00077.bin", + "model.layers.1.self_attn.q_proj.weight": "pytorch_model-00003-of-00077.bin", + "model.layers.1.self_attn.v_proj.weight": "pytorch_model-00003-of-00077.bin", + "model.layers.10.ln1.weight": "pytorch_model-00015-of-00077.bin", + "model.layers.10.ln2.weight": "pytorch_model-00015-of-00077.bin", + "model.layers.10.mlp.down_proj.weight": "pytorch_model-00015-of-00077.bin", + "model.layers.10.mlp.gate_proj.weight": "pytorch_model-00015-of-00077.bin", + "model.layers.10.mlp.up_proj.weight": "pytorch_model-00015-of-00077.bin", + "model.layers.10.self_attn.k_proj.weight": "pytorch_model-00014-of-00077.bin", + "model.layers.10.self_attn.o_proj.weight": "pytorch_model-00014-of-00077.bin", + "model.layers.10.self_attn.q_proj.weight": "pytorch_model-00014-of-00077.bin", + "model.layers.10.self_attn.v_proj.weight": "pytorch_model-00014-of-00077.bin", + "model.layers.11.ln1.weight": "pytorch_model-00016-of-00077.bin", + "model.layers.11.ln2.weight": "pytorch_model-00016-of-00077.bin", + "model.layers.11.mlp.down_proj.weight": "pytorch_model-00016-of-00077.bin", + "model.layers.11.mlp.gate_proj.weight": "pytorch_model-00016-of-00077.bin", + "model.layers.11.mlp.up_proj.weight": "pytorch_model-00016-of-00077.bin", + "model.layers.11.self_attn.k_proj.weight": "pytorch_model-00015-of-00077.bin", + "model.layers.11.self_attn.o_proj.weight": "pytorch_model-00016-of-00077.bin", + "model.layers.11.self_attn.q_proj.weight": "pytorch_model-00015-of-00077.bin", + "model.layers.11.self_attn.v_proj.weight": "pytorch_model-00016-of-00077.bin", + "model.layers.12.ln1.weight": "pytorch_model-00018-of-00077.bin", + "model.layers.12.ln2.weight": "pytorch_model-00018-of-00077.bin", + "model.layers.12.mlp.down_proj.weight": "pytorch_model-00017-of-00077.bin", + "model.layers.12.mlp.gate_proj.weight": "pytorch_model-00017-of-00077.bin", + "model.layers.12.mlp.up_proj.weight": "pytorch_model-00018-of-00077.bin", + "model.layers.12.self_attn.k_proj.weight": "pytorch_model-00017-of-00077.bin", + "model.layers.12.self_attn.o_proj.weight": "pytorch_model-00017-of-00077.bin", + "model.layers.12.self_attn.q_proj.weight": "pytorch_model-00017-of-00077.bin", + "model.layers.12.self_attn.v_proj.weight": "pytorch_model-00017-of-00077.bin", + "model.layers.13.ln1.weight": "pytorch_model-00019-of-00077.bin", + "model.layers.13.ln2.weight": "pytorch_model-00019-of-00077.bin", + "model.layers.13.mlp.down_proj.weight": "pytorch_model-00019-of-00077.bin", + "model.layers.13.mlp.gate_proj.weight": "pytorch_model-00018-of-00077.bin", + "model.layers.13.mlp.up_proj.weight": "pytorch_model-00019-of-00077.bin", + "model.layers.13.self_attn.k_proj.weight": "pytorch_model-00018-of-00077.bin", + "model.layers.13.self_attn.o_proj.weight": "pytorch_model-00018-of-00077.bin", + "model.layers.13.self_attn.q_proj.weight": "pytorch_model-00018-of-00077.bin", + "model.layers.13.self_attn.v_proj.weight": "pytorch_model-00018-of-00077.bin", + "model.layers.14.ln1.weight": "pytorch_model-00020-of-00077.bin", + "model.layers.14.ln2.weight": "pytorch_model-00020-of-00077.bin", + "model.layers.14.mlp.down_proj.weight": "pytorch_model-00020-of-00077.bin", + "model.layers.14.mlp.gate_proj.weight": "pytorch_model-00020-of-00077.bin", + "model.layers.14.mlp.up_proj.weight": "pytorch_model-00020-of-00077.bin", + "model.layers.14.self_attn.k_proj.weight": "pytorch_model-00019-of-00077.bin", + "model.layers.14.self_attn.o_proj.weight": "pytorch_model-00019-of-00077.bin", + "model.layers.14.self_attn.q_proj.weight": "pytorch_model-00019-of-00077.bin", + "model.layers.14.self_attn.v_proj.weight": "pytorch_model-00019-of-00077.bin", + "model.layers.15.ln1.weight": "pytorch_model-00021-of-00077.bin", + "model.layers.15.ln2.weight": "pytorch_model-00021-of-00077.bin", + "model.layers.15.mlp.down_proj.weight": "pytorch_model-00021-of-00077.bin", + "model.layers.15.mlp.gate_proj.weight": "pytorch_model-00021-of-00077.bin", + "model.layers.15.mlp.up_proj.weight": "pytorch_model-00021-of-00077.bin", + "model.layers.15.self_attn.k_proj.weight": "pytorch_model-00020-of-00077.bin", + "model.layers.15.self_attn.o_proj.weight": "pytorch_model-00021-of-00077.bin", + "model.layers.15.self_attn.q_proj.weight": "pytorch_model-00020-of-00077.bin", + "model.layers.15.self_attn.v_proj.weight": "pytorch_model-00021-of-00077.bin", + "model.layers.16.ln1.weight": "pytorch_model-00023-of-00077.bin", + "model.layers.16.ln2.weight": "pytorch_model-00023-of-00077.bin", + "model.layers.16.mlp.down_proj.weight": "pytorch_model-00022-of-00077.bin", + "model.layers.16.mlp.gate_proj.weight": "pytorch_model-00022-of-00077.bin", + "model.layers.16.mlp.up_proj.weight": "pytorch_model-00023-of-00077.bin", + "model.layers.16.self_attn.k_proj.weight": "pytorch_model-00022-of-00077.bin", + "model.layers.16.self_attn.o_proj.weight": "pytorch_model-00022-of-00077.bin", + "model.layers.16.self_attn.q_proj.weight": "pytorch_model-00022-of-00077.bin", + "model.layers.16.self_attn.v_proj.weight": "pytorch_model-00022-of-00077.bin", + "model.layers.17.ln1.weight": "pytorch_model-00024-of-00077.bin", + "model.layers.17.ln2.weight": "pytorch_model-00024-of-00077.bin", + "model.layers.17.mlp.down_proj.weight": "pytorch_model-00024-of-00077.bin", + "model.layers.17.mlp.gate_proj.weight": "pytorch_model-00023-of-00077.bin", + "model.layers.17.mlp.up_proj.weight": "pytorch_model-00024-of-00077.bin", + "model.layers.17.self_attn.k_proj.weight": "pytorch_model-00023-of-00077.bin", + "model.layers.17.self_attn.o_proj.weight": "pytorch_model-00023-of-00077.bin", + "model.layers.17.self_attn.q_proj.weight": "pytorch_model-00023-of-00077.bin", + "model.layers.17.self_attn.v_proj.weight": "pytorch_model-00023-of-00077.bin", + "model.layers.18.ln1.weight": "pytorch_model-00025-of-00077.bin", + "model.layers.18.ln2.weight": "pytorch_model-00025-of-00077.bin", + "model.layers.18.mlp.down_proj.weight": "pytorch_model-00025-of-00077.bin", + "model.layers.18.mlp.gate_proj.weight": "pytorch_model-00025-of-00077.bin", + "model.layers.18.mlp.up_proj.weight": "pytorch_model-00025-of-00077.bin", + "model.layers.18.self_attn.k_proj.weight": "pytorch_model-00024-of-00077.bin", + "model.layers.18.self_attn.o_proj.weight": "pytorch_model-00024-of-00077.bin", + "model.layers.18.self_attn.q_proj.weight": "pytorch_model-00024-of-00077.bin", + "model.layers.18.self_attn.v_proj.weight": "pytorch_model-00024-of-00077.bin", + "model.layers.19.ln1.weight": "pytorch_model-00026-of-00077.bin", + "model.layers.19.ln2.weight": "pytorch_model-00026-of-00077.bin", + "model.layers.19.mlp.down_proj.weight": "pytorch_model-00026-of-00077.bin", + "model.layers.19.mlp.gate_proj.weight": "pytorch_model-00026-of-00077.bin", + "model.layers.19.mlp.up_proj.weight": "pytorch_model-00026-of-00077.bin", + "model.layers.19.self_attn.k_proj.weight": "pytorch_model-00025-of-00077.bin", + "model.layers.19.self_attn.o_proj.weight": "pytorch_model-00026-of-00077.bin", + "model.layers.19.self_attn.q_proj.weight": "pytorch_model-00025-of-00077.bin", + "model.layers.19.self_attn.v_proj.weight": "pytorch_model-00026-of-00077.bin", + "model.layers.2.ln1.weight": "pytorch_model-00005-of-00077.bin", + "model.layers.2.ln2.weight": "pytorch_model-00005-of-00077.bin", + "model.layers.2.mlp.down_proj.weight": "pytorch_model-00005-of-00077.bin", + "model.layers.2.mlp.gate_proj.weight": "pytorch_model-00005-of-00077.bin", + "model.layers.2.mlp.up_proj.weight": "pytorch_model-00005-of-00077.bin", + "model.layers.2.self_attn.k_proj.weight": "pytorch_model-00004-of-00077.bin", + "model.layers.2.self_attn.o_proj.weight": "pytorch_model-00004-of-00077.bin", + "model.layers.2.self_attn.q_proj.weight": "pytorch_model-00004-of-00077.bin", + "model.layers.2.self_attn.v_proj.weight": "pytorch_model-00004-of-00077.bin", + "model.layers.20.ln1.weight": "pytorch_model-00028-of-00077.bin", + "model.layers.20.ln2.weight": "pytorch_model-00028-of-00077.bin", + "model.layers.20.mlp.down_proj.weight": "pytorch_model-00027-of-00077.bin", + "model.layers.20.mlp.gate_proj.weight": "pytorch_model-00027-of-00077.bin", + "model.layers.20.mlp.up_proj.weight": "pytorch_model-00028-of-00077.bin", + "model.layers.20.self_attn.k_proj.weight": "pytorch_model-00027-of-00077.bin", + "model.layers.20.self_attn.o_proj.weight": "pytorch_model-00027-of-00077.bin", + "model.layers.20.self_attn.q_proj.weight": "pytorch_model-00027-of-00077.bin", + "model.layers.20.self_attn.v_proj.weight": "pytorch_model-00027-of-00077.bin", + "model.layers.21.ln1.weight": "pytorch_model-00029-of-00077.bin", + "model.layers.21.ln2.weight": "pytorch_model-00029-of-00077.bin", + "model.layers.21.mlp.down_proj.weight": "pytorch_model-00029-of-00077.bin", + "model.layers.21.mlp.gate_proj.weight": "pytorch_model-00028-of-00077.bin", + "model.layers.21.mlp.up_proj.weight": "pytorch_model-00029-of-00077.bin", + "model.layers.21.self_attn.k_proj.weight": "pytorch_model-00028-of-00077.bin", + "model.layers.21.self_attn.o_proj.weight": "pytorch_model-00028-of-00077.bin", + "model.layers.21.self_attn.q_proj.weight": "pytorch_model-00028-of-00077.bin", + "model.layers.21.self_attn.v_proj.weight": "pytorch_model-00028-of-00077.bin", + "model.layers.22.ln1.weight": "pytorch_model-00030-of-00077.bin", + "model.layers.22.ln2.weight": "pytorch_model-00030-of-00077.bin", + "model.layers.22.mlp.down_proj.weight": "pytorch_model-00030-of-00077.bin", + "model.layers.22.mlp.gate_proj.weight": "pytorch_model-00030-of-00077.bin", + "model.layers.22.mlp.up_proj.weight": "pytorch_model-00030-of-00077.bin", + "model.layers.22.self_attn.k_proj.weight": "pytorch_model-00029-of-00077.bin", + "model.layers.22.self_attn.o_proj.weight": "pytorch_model-00029-of-00077.bin", + "model.layers.22.self_attn.q_proj.weight": "pytorch_model-00029-of-00077.bin", + "model.layers.22.self_attn.v_proj.weight": "pytorch_model-00029-of-00077.bin", + "model.layers.23.ln1.weight": "pytorch_model-00031-of-00077.bin", + "model.layers.23.ln2.weight": "pytorch_model-00031-of-00077.bin", + "model.layers.23.mlp.down_proj.weight": "pytorch_model-00031-of-00077.bin", + "model.layers.23.mlp.gate_proj.weight": "pytorch_model-00031-of-00077.bin", + "model.layers.23.mlp.up_proj.weight": "pytorch_model-00031-of-00077.bin", + "model.layers.23.self_attn.k_proj.weight": "pytorch_model-00030-of-00077.bin", + "model.layers.23.self_attn.o_proj.weight": "pytorch_model-00031-of-00077.bin", + "model.layers.23.self_attn.q_proj.weight": "pytorch_model-00030-of-00077.bin", + "model.layers.23.self_attn.v_proj.weight": "pytorch_model-00031-of-00077.bin", + "model.layers.24.ln1.weight": "pytorch_model-00033-of-00077.bin", + "model.layers.24.ln2.weight": "pytorch_model-00033-of-00077.bin", + "model.layers.24.mlp.down_proj.weight": "pytorch_model-00032-of-00077.bin", + "model.layers.24.mlp.gate_proj.weight": "pytorch_model-00032-of-00077.bin", + "model.layers.24.mlp.up_proj.weight": "pytorch_model-00033-of-00077.bin", + "model.layers.24.self_attn.k_proj.weight": "pytorch_model-00032-of-00077.bin", + "model.layers.24.self_attn.o_proj.weight": "pytorch_model-00032-of-00077.bin", + "model.layers.24.self_attn.q_proj.weight": "pytorch_model-00032-of-00077.bin", + "model.layers.24.self_attn.v_proj.weight": "pytorch_model-00032-of-00077.bin", + "model.layers.25.ln1.weight": "pytorch_model-00034-of-00077.bin", + "model.layers.25.ln2.weight": "pytorch_model-00034-of-00077.bin", + "model.layers.25.mlp.down_proj.weight": "pytorch_model-00034-of-00077.bin", + "model.layers.25.mlp.gate_proj.weight": "pytorch_model-00033-of-00077.bin", + "model.layers.25.mlp.up_proj.weight": "pytorch_model-00034-of-00077.bin", + "model.layers.25.self_attn.k_proj.weight": "pytorch_model-00033-of-00077.bin", + "model.layers.25.self_attn.o_proj.weight": "pytorch_model-00033-of-00077.bin", + "model.layers.25.self_attn.q_proj.weight": "pytorch_model-00033-of-00077.bin", + "model.layers.25.self_attn.v_proj.weight": "pytorch_model-00033-of-00077.bin", + "model.layers.26.ln1.weight": "pytorch_model-00035-of-00077.bin", + "model.layers.26.ln2.weight": "pytorch_model-00035-of-00077.bin", + "model.layers.26.mlp.down_proj.weight": "pytorch_model-00035-of-00077.bin", + "model.layers.26.mlp.gate_proj.weight": "pytorch_model-00035-of-00077.bin", + "model.layers.26.mlp.up_proj.weight": "pytorch_model-00035-of-00077.bin", + "model.layers.26.self_attn.k_proj.weight": "pytorch_model-00034-of-00077.bin", + "model.layers.26.self_attn.o_proj.weight": "pytorch_model-00034-of-00077.bin", + "model.layers.26.self_attn.q_proj.weight": "pytorch_model-00034-of-00077.bin", + "model.layers.26.self_attn.v_proj.weight": "pytorch_model-00034-of-00077.bin", + "model.layers.27.ln1.weight": "pytorch_model-00036-of-00077.bin", + "model.layers.27.ln2.weight": "pytorch_model-00036-of-00077.bin", + "model.layers.27.mlp.down_proj.weight": "pytorch_model-00036-of-00077.bin", + "model.layers.27.mlp.gate_proj.weight": "pytorch_model-00036-of-00077.bin", + "model.layers.27.mlp.up_proj.weight": "pytorch_model-00036-of-00077.bin", + "model.layers.27.self_attn.k_proj.weight": "pytorch_model-00035-of-00077.bin", + "model.layers.27.self_attn.o_proj.weight": "pytorch_model-00036-of-00077.bin", + "model.layers.27.self_attn.q_proj.weight": "pytorch_model-00035-of-00077.bin", + "model.layers.27.self_attn.v_proj.weight": "pytorch_model-00036-of-00077.bin", + "model.layers.28.ln1.weight": "pytorch_model-00038-of-00077.bin", + "model.layers.28.ln2.weight": "pytorch_model-00038-of-00077.bin", + "model.layers.28.mlp.down_proj.weight": "pytorch_model-00037-of-00077.bin", + "model.layers.28.mlp.gate_proj.weight": "pytorch_model-00037-of-00077.bin", + "model.layers.28.mlp.up_proj.weight": "pytorch_model-00038-of-00077.bin", + "model.layers.28.self_attn.k_proj.weight": "pytorch_model-00037-of-00077.bin", + "model.layers.28.self_attn.o_proj.weight": "pytorch_model-00037-of-00077.bin", + "model.layers.28.self_attn.q_proj.weight": "pytorch_model-00037-of-00077.bin", + "model.layers.28.self_attn.v_proj.weight": "pytorch_model-00037-of-00077.bin", + "model.layers.29.ln1.weight": "pytorch_model-00039-of-00077.bin", + "model.layers.29.ln2.weight": "pytorch_model-00039-of-00077.bin", + "model.layers.29.mlp.down_proj.weight": "pytorch_model-00039-of-00077.bin", + "model.layers.29.mlp.gate_proj.weight": "pytorch_model-00038-of-00077.bin", + "model.layers.29.mlp.up_proj.weight": "pytorch_model-00039-of-00077.bin", + "model.layers.29.self_attn.k_proj.weight": "pytorch_model-00038-of-00077.bin", + "model.layers.29.self_attn.o_proj.weight": "pytorch_model-00038-of-00077.bin", + "model.layers.29.self_attn.q_proj.weight": "pytorch_model-00038-of-00077.bin", + "model.layers.29.self_attn.v_proj.weight": "pytorch_model-00038-of-00077.bin", + "model.layers.3.ln1.weight": "pytorch_model-00006-of-00077.bin", + "model.layers.3.ln2.weight": "pytorch_model-00006-of-00077.bin", + "model.layers.3.mlp.down_proj.weight": "pytorch_model-00006-of-00077.bin", + "model.layers.3.mlp.gate_proj.weight": "pytorch_model-00006-of-00077.bin", + "model.layers.3.mlp.up_proj.weight": "pytorch_model-00006-of-00077.bin", + "model.layers.3.self_attn.k_proj.weight": "pytorch_model-00005-of-00077.bin", + "model.layers.3.self_attn.o_proj.weight": "pytorch_model-00006-of-00077.bin", + "model.layers.3.self_attn.q_proj.weight": "pytorch_model-00005-of-00077.bin", + "model.layers.3.self_attn.v_proj.weight": "pytorch_model-00006-of-00077.bin", + "model.layers.30.ln1.weight": "pytorch_model-00040-of-00077.bin", + "model.layers.30.ln2.weight": "pytorch_model-00040-of-00077.bin", + "model.layers.30.mlp.down_proj.weight": "pytorch_model-00040-of-00077.bin", + "model.layers.30.mlp.gate_proj.weight": "pytorch_model-00040-of-00077.bin", + "model.layers.30.mlp.up_proj.weight": "pytorch_model-00040-of-00077.bin", + "model.layers.30.self_attn.k_proj.weight": "pytorch_model-00039-of-00077.bin", + "model.layers.30.self_attn.o_proj.weight": "pytorch_model-00039-of-00077.bin", + "model.layers.30.self_attn.q_proj.weight": "pytorch_model-00039-of-00077.bin", + "model.layers.30.self_attn.v_proj.weight": "pytorch_model-00039-of-00077.bin", + "model.layers.31.ln1.weight": "pytorch_model-00041-of-00077.bin", + "model.layers.31.ln2.weight": "pytorch_model-00041-of-00077.bin", + "model.layers.31.mlp.down_proj.weight": "pytorch_model-00041-of-00077.bin", + "model.layers.31.mlp.gate_proj.weight": "pytorch_model-00041-of-00077.bin", + "model.layers.31.mlp.up_proj.weight": "pytorch_model-00041-of-00077.bin", + "model.layers.31.self_attn.k_proj.weight": "pytorch_model-00040-of-00077.bin", + "model.layers.31.self_attn.o_proj.weight": "pytorch_model-00041-of-00077.bin", + "model.layers.31.self_attn.q_proj.weight": "pytorch_model-00040-of-00077.bin", + "model.layers.31.self_attn.v_proj.weight": "pytorch_model-00041-of-00077.bin", + "model.layers.32.ln1.weight": "pytorch_model-00043-of-00077.bin", + "model.layers.32.ln2.weight": "pytorch_model-00043-of-00077.bin", + "model.layers.32.mlp.down_proj.weight": "pytorch_model-00042-of-00077.bin", + "model.layers.32.mlp.gate_proj.weight": "pytorch_model-00042-of-00077.bin", + "model.layers.32.mlp.up_proj.weight": "pytorch_model-00043-of-00077.bin", + "model.layers.32.self_attn.k_proj.weight": "pytorch_model-00042-of-00077.bin", + "model.layers.32.self_attn.o_proj.weight": "pytorch_model-00042-of-00077.bin", + "model.layers.32.self_attn.q_proj.weight": "pytorch_model-00042-of-00077.bin", + "model.layers.32.self_attn.v_proj.weight": "pytorch_model-00042-of-00077.bin", + "model.layers.33.ln1.weight": "pytorch_model-00044-of-00077.bin", + "model.layers.33.ln2.weight": "pytorch_model-00044-of-00077.bin", + "model.layers.33.mlp.down_proj.weight": "pytorch_model-00044-of-00077.bin", + "model.layers.33.mlp.gate_proj.weight": "pytorch_model-00043-of-00077.bin", + "model.layers.33.mlp.up_proj.weight": "pytorch_model-00044-of-00077.bin", + "model.layers.33.self_attn.k_proj.weight": "pytorch_model-00043-of-00077.bin", + "model.layers.33.self_attn.o_proj.weight": "pytorch_model-00043-of-00077.bin", + "model.layers.33.self_attn.q_proj.weight": "pytorch_model-00043-of-00077.bin", + "model.layers.33.self_attn.v_proj.weight": "pytorch_model-00043-of-00077.bin", + "model.layers.34.ln1.weight": "pytorch_model-00045-of-00077.bin", + "model.layers.34.ln2.weight": "pytorch_model-00045-of-00077.bin", + "model.layers.34.mlp.down_proj.weight": "pytorch_model-00045-of-00077.bin", + "model.layers.34.mlp.gate_proj.weight": "pytorch_model-00045-of-00077.bin", + "model.layers.34.mlp.up_proj.weight": "pytorch_model-00045-of-00077.bin", + "model.layers.34.self_attn.k_proj.weight": "pytorch_model-00044-of-00077.bin", + "model.layers.34.self_attn.o_proj.weight": "pytorch_model-00044-of-00077.bin", + "model.layers.34.self_attn.q_proj.weight": "pytorch_model-00044-of-00077.bin", + "model.layers.34.self_attn.v_proj.weight": "pytorch_model-00044-of-00077.bin", + "model.layers.35.ln1.weight": "pytorch_model-00046-of-00077.bin", + "model.layers.35.ln2.weight": "pytorch_model-00046-of-00077.bin", + "model.layers.35.mlp.down_proj.weight": "pytorch_model-00046-of-00077.bin", + "model.layers.35.mlp.gate_proj.weight": "pytorch_model-00046-of-00077.bin", + "model.layers.35.mlp.up_proj.weight": "pytorch_model-00046-of-00077.bin", + "model.layers.35.self_attn.k_proj.weight": "pytorch_model-00045-of-00077.bin", + "model.layers.35.self_attn.o_proj.weight": "pytorch_model-00046-of-00077.bin", + "model.layers.35.self_attn.q_proj.weight": "pytorch_model-00045-of-00077.bin", + "model.layers.35.self_attn.v_proj.weight": "pytorch_model-00046-of-00077.bin", + "model.layers.36.ln1.weight": "pytorch_model-00048-of-00077.bin", + "model.layers.36.ln2.weight": "pytorch_model-00048-of-00077.bin", + "model.layers.36.mlp.down_proj.weight": "pytorch_model-00047-of-00077.bin", + "model.layers.36.mlp.gate_proj.weight": "pytorch_model-00047-of-00077.bin", + "model.layers.36.mlp.up_proj.weight": "pytorch_model-00048-of-00077.bin", + "model.layers.36.self_attn.k_proj.weight": "pytorch_model-00047-of-00077.bin", + "model.layers.36.self_attn.o_proj.weight": "pytorch_model-00047-of-00077.bin", + "model.layers.36.self_attn.q_proj.weight": "pytorch_model-00047-of-00077.bin", + "model.layers.36.self_attn.v_proj.weight": "pytorch_model-00047-of-00077.bin", + "model.layers.37.ln1.weight": "pytorch_model-00049-of-00077.bin", + "model.layers.37.ln2.weight": "pytorch_model-00049-of-00077.bin", + "model.layers.37.mlp.down_proj.weight": "pytorch_model-00049-of-00077.bin", + "model.layers.37.mlp.gate_proj.weight": "pytorch_model-00048-of-00077.bin", + "model.layers.37.mlp.up_proj.weight": "pytorch_model-00049-of-00077.bin", + "model.layers.37.self_attn.k_proj.weight": "pytorch_model-00048-of-00077.bin", + "model.layers.37.self_attn.o_proj.weight": "pytorch_model-00048-of-00077.bin", + "model.layers.37.self_attn.q_proj.weight": "pytorch_model-00048-of-00077.bin", + "model.layers.37.self_attn.v_proj.weight": "pytorch_model-00048-of-00077.bin", + "model.layers.38.ln1.weight": "pytorch_model-00050-of-00077.bin", + "model.layers.38.ln2.weight": "pytorch_model-00050-of-00077.bin", + "model.layers.38.mlp.down_proj.weight": "pytorch_model-00050-of-00077.bin", + "model.layers.38.mlp.gate_proj.weight": "pytorch_model-00050-of-00077.bin", + "model.layers.38.mlp.up_proj.weight": "pytorch_model-00050-of-00077.bin", + "model.layers.38.self_attn.k_proj.weight": "pytorch_model-00049-of-00077.bin", + "model.layers.38.self_attn.o_proj.weight": "pytorch_model-00049-of-00077.bin", + "model.layers.38.self_attn.q_proj.weight": "pytorch_model-00049-of-00077.bin", + "model.layers.38.self_attn.v_proj.weight": "pytorch_model-00049-of-00077.bin", + "model.layers.39.ln1.weight": "pytorch_model-00051-of-00077.bin", + "model.layers.39.ln2.weight": "pytorch_model-00051-of-00077.bin", + "model.layers.39.mlp.down_proj.weight": "pytorch_model-00051-of-00077.bin", + "model.layers.39.mlp.gate_proj.weight": "pytorch_model-00051-of-00077.bin", + "model.layers.39.mlp.up_proj.weight": "pytorch_model-00051-of-00077.bin", + "model.layers.39.self_attn.k_proj.weight": "pytorch_model-00050-of-00077.bin", + "model.layers.39.self_attn.o_proj.weight": "pytorch_model-00051-of-00077.bin", + "model.layers.39.self_attn.q_proj.weight": "pytorch_model-00050-of-00077.bin", + "model.layers.39.self_attn.v_proj.weight": "pytorch_model-00051-of-00077.bin", + "model.layers.4.ln1.weight": "pytorch_model-00008-of-00077.bin", + "model.layers.4.ln2.weight": "pytorch_model-00008-of-00077.bin", + "model.layers.4.mlp.down_proj.weight": "pytorch_model-00007-of-00077.bin", + "model.layers.4.mlp.gate_proj.weight": "pytorch_model-00007-of-00077.bin", + "model.layers.4.mlp.up_proj.weight": "pytorch_model-00008-of-00077.bin", + "model.layers.4.self_attn.k_proj.weight": "pytorch_model-00007-of-00077.bin", + "model.layers.4.self_attn.o_proj.weight": "pytorch_model-00007-of-00077.bin", + "model.layers.4.self_attn.q_proj.weight": "pytorch_model-00007-of-00077.bin", + "model.layers.4.self_attn.v_proj.weight": "pytorch_model-00007-of-00077.bin", + "model.layers.40.ln1.weight": "pytorch_model-00053-of-00077.bin", + "model.layers.40.ln2.weight": "pytorch_model-00053-of-00077.bin", + "model.layers.40.mlp.down_proj.weight": "pytorch_model-00052-of-00077.bin", + "model.layers.40.mlp.gate_proj.weight": "pytorch_model-00052-of-00077.bin", + "model.layers.40.mlp.up_proj.weight": "pytorch_model-00053-of-00077.bin", + "model.layers.40.self_attn.k_proj.weight": "pytorch_model-00052-of-00077.bin", + "model.layers.40.self_attn.o_proj.weight": "pytorch_model-00052-of-00077.bin", + "model.layers.40.self_attn.q_proj.weight": "pytorch_model-00052-of-00077.bin", + "model.layers.40.self_attn.v_proj.weight": "pytorch_model-00052-of-00077.bin", + "model.layers.41.ln1.weight": "pytorch_model-00054-of-00077.bin", + "model.layers.41.ln2.weight": "pytorch_model-00054-of-00077.bin", + "model.layers.41.mlp.down_proj.weight": "pytorch_model-00054-of-00077.bin", + "model.layers.41.mlp.gate_proj.weight": "pytorch_model-00053-of-00077.bin", + "model.layers.41.mlp.up_proj.weight": "pytorch_model-00054-of-00077.bin", + "model.layers.41.self_attn.k_proj.weight": "pytorch_model-00053-of-00077.bin", + "model.layers.41.self_attn.o_proj.weight": "pytorch_model-00053-of-00077.bin", + "model.layers.41.self_attn.q_proj.weight": "pytorch_model-00053-of-00077.bin", + "model.layers.41.self_attn.v_proj.weight": "pytorch_model-00053-of-00077.bin", + "model.layers.42.ln1.weight": "pytorch_model-00055-of-00077.bin", + "model.layers.42.ln2.weight": "pytorch_model-00055-of-00077.bin", + "model.layers.42.mlp.down_proj.weight": "pytorch_model-00055-of-00077.bin", + "model.layers.42.mlp.gate_proj.weight": "pytorch_model-00055-of-00077.bin", + "model.layers.42.mlp.up_proj.weight": "pytorch_model-00055-of-00077.bin", + "model.layers.42.self_attn.k_proj.weight": "pytorch_model-00054-of-00077.bin", + "model.layers.42.self_attn.o_proj.weight": "pytorch_model-00054-of-00077.bin", + "model.layers.42.self_attn.q_proj.weight": "pytorch_model-00054-of-00077.bin", + "model.layers.42.self_attn.v_proj.weight": "pytorch_model-00054-of-00077.bin", + "model.layers.43.ln1.weight": "pytorch_model-00056-of-00077.bin", + "model.layers.43.ln2.weight": "pytorch_model-00056-of-00077.bin", + "model.layers.43.mlp.down_proj.weight": "pytorch_model-00056-of-00077.bin", + "model.layers.43.mlp.gate_proj.weight": "pytorch_model-00056-of-00077.bin", + "model.layers.43.mlp.up_proj.weight": "pytorch_model-00056-of-00077.bin", + "model.layers.43.self_attn.k_proj.weight": "pytorch_model-00055-of-00077.bin", + "model.layers.43.self_attn.o_proj.weight": "pytorch_model-00056-of-00077.bin", + "model.layers.43.self_attn.q_proj.weight": "pytorch_model-00055-of-00077.bin", + "model.layers.43.self_attn.v_proj.weight": "pytorch_model-00056-of-00077.bin", + "model.layers.44.ln1.weight": "pytorch_model-00058-of-00077.bin", + "model.layers.44.ln2.weight": "pytorch_model-00058-of-00077.bin", + "model.layers.44.mlp.down_proj.weight": "pytorch_model-00057-of-00077.bin", + "model.layers.44.mlp.gate_proj.weight": "pytorch_model-00057-of-00077.bin", + "model.layers.44.mlp.up_proj.weight": "pytorch_model-00058-of-00077.bin", + "model.layers.44.self_attn.k_proj.weight": "pytorch_model-00057-of-00077.bin", + "model.layers.44.self_attn.o_proj.weight": "pytorch_model-00057-of-00077.bin", + "model.layers.44.self_attn.q_proj.weight": "pytorch_model-00057-of-00077.bin", + "model.layers.44.self_attn.v_proj.weight": "pytorch_model-00057-of-00077.bin", + "model.layers.45.ln1.weight": "pytorch_model-00059-of-00077.bin", + "model.layers.45.ln2.weight": "pytorch_model-00059-of-00077.bin", + "model.layers.45.mlp.down_proj.weight": "pytorch_model-00059-of-00077.bin", + "model.layers.45.mlp.gate_proj.weight": "pytorch_model-00058-of-00077.bin", + "model.layers.45.mlp.up_proj.weight": "pytorch_model-00059-of-00077.bin", + "model.layers.45.self_attn.k_proj.weight": "pytorch_model-00058-of-00077.bin", + "model.layers.45.self_attn.o_proj.weight": "pytorch_model-00058-of-00077.bin", + "model.layers.45.self_attn.q_proj.weight": "pytorch_model-00058-of-00077.bin", + "model.layers.45.self_attn.v_proj.weight": "pytorch_model-00058-of-00077.bin", + "model.layers.46.ln1.weight": "pytorch_model-00060-of-00077.bin", + "model.layers.46.ln2.weight": "pytorch_model-00060-of-00077.bin", + "model.layers.46.mlp.down_proj.weight": "pytorch_model-00060-of-00077.bin", + "model.layers.46.mlp.gate_proj.weight": "pytorch_model-00060-of-00077.bin", + "model.layers.46.mlp.up_proj.weight": "pytorch_model-00060-of-00077.bin", + "model.layers.46.self_attn.k_proj.weight": "pytorch_model-00059-of-00077.bin", + "model.layers.46.self_attn.o_proj.weight": "pytorch_model-00059-of-00077.bin", + "model.layers.46.self_attn.q_proj.weight": "pytorch_model-00059-of-00077.bin", + "model.layers.46.self_attn.v_proj.weight": "pytorch_model-00059-of-00077.bin", + "model.layers.47.ln1.weight": "pytorch_model-00061-of-00077.bin", + "model.layers.47.ln2.weight": "pytorch_model-00061-of-00077.bin", + "model.layers.47.mlp.down_proj.weight": "pytorch_model-00061-of-00077.bin", + "model.layers.47.mlp.gate_proj.weight": "pytorch_model-00061-of-00077.bin", + "model.layers.47.mlp.up_proj.weight": "pytorch_model-00061-of-00077.bin", + "model.layers.47.self_attn.k_proj.weight": "pytorch_model-00060-of-00077.bin", + "model.layers.47.self_attn.o_proj.weight": "pytorch_model-00061-of-00077.bin", + "model.layers.47.self_attn.q_proj.weight": "pytorch_model-00060-of-00077.bin", + "model.layers.47.self_attn.v_proj.weight": "pytorch_model-00061-of-00077.bin", + "model.layers.48.ln1.weight": "pytorch_model-00063-of-00077.bin", + "model.layers.48.ln2.weight": "pytorch_model-00063-of-00077.bin", + "model.layers.48.mlp.down_proj.weight": "pytorch_model-00062-of-00077.bin", + "model.layers.48.mlp.gate_proj.weight": "pytorch_model-00062-of-00077.bin", + "model.layers.48.mlp.up_proj.weight": "pytorch_model-00063-of-00077.bin", + "model.layers.48.self_attn.k_proj.weight": "pytorch_model-00062-of-00077.bin", + "model.layers.48.self_attn.o_proj.weight": "pytorch_model-00062-of-00077.bin", + "model.layers.48.self_attn.q_proj.weight": "pytorch_model-00062-of-00077.bin", + "model.layers.48.self_attn.v_proj.weight": "pytorch_model-00062-of-00077.bin", + "model.layers.49.ln1.weight": "pytorch_model-00064-of-00077.bin", + "model.layers.49.ln2.weight": "pytorch_model-00064-of-00077.bin", + "model.layers.49.mlp.down_proj.weight": "pytorch_model-00064-of-00077.bin", + "model.layers.49.mlp.gate_proj.weight": "pytorch_model-00063-of-00077.bin", + "model.layers.49.mlp.up_proj.weight": "pytorch_model-00064-of-00077.bin", + "model.layers.49.self_attn.k_proj.weight": "pytorch_model-00063-of-00077.bin", + "model.layers.49.self_attn.o_proj.weight": "pytorch_model-00063-of-00077.bin", + "model.layers.49.self_attn.q_proj.weight": "pytorch_model-00063-of-00077.bin", + "model.layers.49.self_attn.v_proj.weight": "pytorch_model-00063-of-00077.bin", + "model.layers.5.ln1.weight": "pytorch_model-00009-of-00077.bin", + "model.layers.5.ln2.weight": "pytorch_model-00009-of-00077.bin", + "model.layers.5.mlp.down_proj.weight": "pytorch_model-00009-of-00077.bin", + "model.layers.5.mlp.gate_proj.weight": "pytorch_model-00008-of-00077.bin", + "model.layers.5.mlp.up_proj.weight": "pytorch_model-00009-of-00077.bin", + "model.layers.5.self_attn.k_proj.weight": "pytorch_model-00008-of-00077.bin", + "model.layers.5.self_attn.o_proj.weight": "pytorch_model-00008-of-00077.bin", + "model.layers.5.self_attn.q_proj.weight": "pytorch_model-00008-of-00077.bin", + "model.layers.5.self_attn.v_proj.weight": "pytorch_model-00008-of-00077.bin", + "model.layers.50.ln1.weight": "pytorch_model-00065-of-00077.bin", + "model.layers.50.ln2.weight": "pytorch_model-00065-of-00077.bin", + "model.layers.50.mlp.down_proj.weight": "pytorch_model-00065-of-00077.bin", + "model.layers.50.mlp.gate_proj.weight": "pytorch_model-00065-of-00077.bin", + "model.layers.50.mlp.up_proj.weight": "pytorch_model-00065-of-00077.bin", + "model.layers.50.self_attn.k_proj.weight": "pytorch_model-00064-of-00077.bin", + "model.layers.50.self_attn.o_proj.weight": "pytorch_model-00064-of-00077.bin", + "model.layers.50.self_attn.q_proj.weight": "pytorch_model-00064-of-00077.bin", + "model.layers.50.self_attn.v_proj.weight": "pytorch_model-00064-of-00077.bin", + "model.layers.51.ln1.weight": "pytorch_model-00066-of-00077.bin", + "model.layers.51.ln2.weight": "pytorch_model-00066-of-00077.bin", + "model.layers.51.mlp.down_proj.weight": "pytorch_model-00066-of-00077.bin", + "model.layers.51.mlp.gate_proj.weight": "pytorch_model-00066-of-00077.bin", + "model.layers.51.mlp.up_proj.weight": "pytorch_model-00066-of-00077.bin", + "model.layers.51.self_attn.k_proj.weight": "pytorch_model-00065-of-00077.bin", + "model.layers.51.self_attn.o_proj.weight": "pytorch_model-00066-of-00077.bin", + "model.layers.51.self_attn.q_proj.weight": "pytorch_model-00065-of-00077.bin", + "model.layers.51.self_attn.v_proj.weight": "pytorch_model-00066-of-00077.bin", + "model.layers.52.ln1.weight": "pytorch_model-00068-of-00077.bin", + "model.layers.52.ln2.weight": "pytorch_model-00068-of-00077.bin", + "model.layers.52.mlp.down_proj.weight": "pytorch_model-00067-of-00077.bin", + "model.layers.52.mlp.gate_proj.weight": "pytorch_model-00067-of-00077.bin", + "model.layers.52.mlp.up_proj.weight": "pytorch_model-00068-of-00077.bin", + "model.layers.52.self_attn.k_proj.weight": "pytorch_model-00067-of-00077.bin", + "model.layers.52.self_attn.o_proj.weight": "pytorch_model-00067-of-00077.bin", + "model.layers.52.self_attn.q_proj.weight": "pytorch_model-00067-of-00077.bin", + "model.layers.52.self_attn.v_proj.weight": "pytorch_model-00067-of-00077.bin", + "model.layers.53.ln1.weight": "pytorch_model-00069-of-00077.bin", + "model.layers.53.ln2.weight": "pytorch_model-00069-of-00077.bin", + "model.layers.53.mlp.down_proj.weight": "pytorch_model-00069-of-00077.bin", + "model.layers.53.mlp.gate_proj.weight": "pytorch_model-00068-of-00077.bin", + "model.layers.53.mlp.up_proj.weight": "pytorch_model-00069-of-00077.bin", + "model.layers.53.self_attn.k_proj.weight": "pytorch_model-00068-of-00077.bin", + "model.layers.53.self_attn.o_proj.weight": "pytorch_model-00068-of-00077.bin", + "model.layers.53.self_attn.q_proj.weight": "pytorch_model-00068-of-00077.bin", + "model.layers.53.self_attn.v_proj.weight": "pytorch_model-00068-of-00077.bin", + "model.layers.54.ln1.weight": "pytorch_model-00070-of-00077.bin", + "model.layers.54.ln2.weight": "pytorch_model-00070-of-00077.bin", + "model.layers.54.mlp.down_proj.weight": "pytorch_model-00070-of-00077.bin", + "model.layers.54.mlp.gate_proj.weight": "pytorch_model-00070-of-00077.bin", + "model.layers.54.mlp.up_proj.weight": "pytorch_model-00070-of-00077.bin", + "model.layers.54.self_attn.k_proj.weight": "pytorch_model-00069-of-00077.bin", + "model.layers.54.self_attn.o_proj.weight": "pytorch_model-00069-of-00077.bin", + "model.layers.54.self_attn.q_proj.weight": "pytorch_model-00069-of-00077.bin", + "model.layers.54.self_attn.v_proj.weight": "pytorch_model-00069-of-00077.bin", + "model.layers.55.ln1.weight": "pytorch_model-00071-of-00077.bin", + "model.layers.55.ln2.weight": "pytorch_model-00071-of-00077.bin", + "model.layers.55.mlp.down_proj.weight": "pytorch_model-00071-of-00077.bin", + "model.layers.55.mlp.gate_proj.weight": "pytorch_model-00071-of-00077.bin", + "model.layers.55.mlp.up_proj.weight": "pytorch_model-00071-of-00077.bin", + "model.layers.55.self_attn.k_proj.weight": "pytorch_model-00070-of-00077.bin", + "model.layers.55.self_attn.o_proj.weight": "pytorch_model-00071-of-00077.bin", + "model.layers.55.self_attn.q_proj.weight": "pytorch_model-00070-of-00077.bin", + "model.layers.55.self_attn.v_proj.weight": "pytorch_model-00071-of-00077.bin", + "model.layers.56.ln1.weight": "pytorch_model-00073-of-00077.bin", + "model.layers.56.ln2.weight": "pytorch_model-00073-of-00077.bin", + "model.layers.56.mlp.down_proj.weight": "pytorch_model-00072-of-00077.bin", + "model.layers.56.mlp.gate_proj.weight": "pytorch_model-00072-of-00077.bin", + "model.layers.56.mlp.up_proj.weight": "pytorch_model-00073-of-00077.bin", + "model.layers.56.self_attn.k_proj.weight": "pytorch_model-00072-of-00077.bin", + "model.layers.56.self_attn.o_proj.weight": "pytorch_model-00072-of-00077.bin", + "model.layers.56.self_attn.q_proj.weight": "pytorch_model-00072-of-00077.bin", + "model.layers.56.self_attn.v_proj.weight": "pytorch_model-00072-of-00077.bin", + "model.layers.57.ln1.weight": "pytorch_model-00074-of-00077.bin", + "model.layers.57.ln2.weight": "pytorch_model-00074-of-00077.bin", + "model.layers.57.mlp.down_proj.weight": "pytorch_model-00074-of-00077.bin", + "model.layers.57.mlp.gate_proj.weight": "pytorch_model-00073-of-00077.bin", + "model.layers.57.mlp.up_proj.weight": "pytorch_model-00074-of-00077.bin", + "model.layers.57.self_attn.k_proj.weight": "pytorch_model-00073-of-00077.bin", + "model.layers.57.self_attn.o_proj.weight": "pytorch_model-00073-of-00077.bin", + "model.layers.57.self_attn.q_proj.weight": "pytorch_model-00073-of-00077.bin", + "model.layers.57.self_attn.v_proj.weight": "pytorch_model-00073-of-00077.bin", + "model.layers.58.ln1.weight": "pytorch_model-00075-of-00077.bin", + "model.layers.58.ln2.weight": "pytorch_model-00075-of-00077.bin", + "model.layers.58.mlp.down_proj.weight": "pytorch_model-00075-of-00077.bin", + "model.layers.58.mlp.gate_proj.weight": "pytorch_model-00075-of-00077.bin", + "model.layers.58.mlp.up_proj.weight": "pytorch_model-00075-of-00077.bin", + "model.layers.58.self_attn.k_proj.weight": "pytorch_model-00074-of-00077.bin", + "model.layers.58.self_attn.o_proj.weight": "pytorch_model-00074-of-00077.bin", + "model.layers.58.self_attn.q_proj.weight": "pytorch_model-00074-of-00077.bin", + "model.layers.58.self_attn.v_proj.weight": "pytorch_model-00074-of-00077.bin", + "model.layers.59.ln1.weight": "pytorch_model-00076-of-00077.bin", + "model.layers.59.ln2.weight": "pytorch_model-00076-of-00077.bin", + "model.layers.59.mlp.down_proj.weight": "pytorch_model-00076-of-00077.bin", + "model.layers.59.mlp.gate_proj.weight": "pytorch_model-00076-of-00077.bin", + "model.layers.59.mlp.up_proj.weight": "pytorch_model-00076-of-00077.bin", + "model.layers.59.self_attn.k_proj.weight": "pytorch_model-00075-of-00077.bin", + "model.layers.59.self_attn.o_proj.weight": "pytorch_model-00076-of-00077.bin", + "model.layers.59.self_attn.q_proj.weight": "pytorch_model-00075-of-00077.bin", + "model.layers.59.self_attn.v_proj.weight": "pytorch_model-00076-of-00077.bin", + "model.layers.6.ln1.weight": "pytorch_model-00010-of-00077.bin", + "model.layers.6.ln2.weight": "pytorch_model-00010-of-00077.bin", + "model.layers.6.mlp.down_proj.weight": "pytorch_model-00010-of-00077.bin", + "model.layers.6.mlp.gate_proj.weight": "pytorch_model-00010-of-00077.bin", + "model.layers.6.mlp.up_proj.weight": "pytorch_model-00010-of-00077.bin", + "model.layers.6.self_attn.k_proj.weight": "pytorch_model-00009-of-00077.bin", + "model.layers.6.self_attn.o_proj.weight": "pytorch_model-00009-of-00077.bin", + "model.layers.6.self_attn.q_proj.weight": "pytorch_model-00009-of-00077.bin", + "model.layers.6.self_attn.v_proj.weight": "pytorch_model-00009-of-00077.bin", + "model.layers.7.ln1.weight": "pytorch_model-00011-of-00077.bin", + "model.layers.7.ln2.weight": "pytorch_model-00011-of-00077.bin", + "model.layers.7.mlp.down_proj.weight": "pytorch_model-00011-of-00077.bin", + "model.layers.7.mlp.gate_proj.weight": "pytorch_model-00011-of-00077.bin", + "model.layers.7.mlp.up_proj.weight": "pytorch_model-00011-of-00077.bin", + "model.layers.7.self_attn.k_proj.weight": "pytorch_model-00010-of-00077.bin", + "model.layers.7.self_attn.o_proj.weight": "pytorch_model-00011-of-00077.bin", + "model.layers.7.self_attn.q_proj.weight": "pytorch_model-00010-of-00077.bin", + "model.layers.7.self_attn.v_proj.weight": "pytorch_model-00011-of-00077.bin", + "model.layers.8.ln1.weight": "pytorch_model-00013-of-00077.bin", + "model.layers.8.ln2.weight": "pytorch_model-00013-of-00077.bin", + "model.layers.8.mlp.down_proj.weight": "pytorch_model-00012-of-00077.bin", + "model.layers.8.mlp.gate_proj.weight": "pytorch_model-00012-of-00077.bin", + "model.layers.8.mlp.up_proj.weight": "pytorch_model-00013-of-00077.bin", + "model.layers.8.self_attn.k_proj.weight": "pytorch_model-00012-of-00077.bin", + "model.layers.8.self_attn.o_proj.weight": "pytorch_model-00012-of-00077.bin", + "model.layers.8.self_attn.q_proj.weight": "pytorch_model-00012-of-00077.bin", + "model.layers.8.self_attn.v_proj.weight": "pytorch_model-00012-of-00077.bin", + "model.layers.9.ln1.weight": "pytorch_model-00014-of-00077.bin", + "model.layers.9.ln2.weight": "pytorch_model-00014-of-00077.bin", + "model.layers.9.mlp.down_proj.weight": "pytorch_model-00014-of-00077.bin", + "model.layers.9.mlp.gate_proj.weight": "pytorch_model-00013-of-00077.bin", + "model.layers.9.mlp.up_proj.weight": "pytorch_model-00014-of-00077.bin", + "model.layers.9.self_attn.k_proj.weight": "pytorch_model-00013-of-00077.bin", + "model.layers.9.self_attn.o_proj.weight": "pytorch_model-00013-of-00077.bin", + "model.layers.9.self_attn.q_proj.weight": "pytorch_model-00013-of-00077.bin", + "model.layers.9.self_attn.v_proj.weight": "pytorch_model-00013-of-00077.bin", + "model.norm.weight": "pytorch_model-00076-of-00077.bin" + } +} diff --git a/special_tokens_map.json b/special_tokens_map.json new file mode 100644 index 0000000000000000000000000000000000000000..335ae1cfaf4a0aba8bce5e98aa79e53f61f35848 --- /dev/null +++ b/special_tokens_map.json @@ -0,0 +1,30 @@ +{ + "bos_token": { + "content": "<|startoftext|>", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false + }, + "eos_token": { + "content": "<|endoftext|>", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false + }, + "pad_token": { + "content": "", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false + }, + "unk_token": { + "content": "", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false + } +} diff --git a/tokenization_yi.py b/tokenization_yi.py new file mode 100644 index 0000000000000000000000000000000000000000..bb82053316afd31188cb12447d03cdf783a909a5 --- /dev/null +++ b/tokenization_yi.py @@ -0,0 +1,255 @@ +import os +from shutil import copyfile +from typing import Any, Dict, List, Optional, Tuple + +import sentencepiece as spm +from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer +from transformers.utils import logging + +logger = logging.get_logger(__name__) + +VOCAB_FILES_NAMES = {"vocab_file": "tokenizer.model"} + +PRETRAINED_VOCAB_FILES_MAP = { + "vocab_file": {}, + "tokenizer_file": {}, +} +PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {} + + +class YiTokenizer(PreTrainedTokenizer): + """ + Construct a Yi tokenizer. Based on byte-level Byte-Pair-Encoding. + + Args: + vocab_file (`str`): + Path to the vocabulary file. + """ + + vocab_files_names = VOCAB_FILES_NAMES + pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP + max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES + model_input_names = ["input_ids", "attention_mask"] + + def __init__( + self, + vocab_file, + unk_token="", + bos_token="<|startoftext|>", + eos_token="<|endoftext|>", + pad_token="", + sp_model_kwargs: Optional[Dict[str, Any]] = None, + add_bos_token=True, + add_eos_token=False, + clean_up_tokenization_spaces=False, + **kwargs, + ): + self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs + bos_token = ( + AddedToken(bos_token, lstrip=False, rstrip=False) + if isinstance(bos_token, str) + else bos_token + ) + eos_token = ( + AddedToken(eos_token, lstrip=False, rstrip=False) + if isinstance(eos_token, str) + else eos_token + ) + unk_token = ( + AddedToken(unk_token, lstrip=False, rstrip=False) + if isinstance(unk_token, str) + else unk_token + ) + pad_token = ( + AddedToken(pad_token, lstrip=False, rstrip=False) + if isinstance(pad_token, str) + else pad_token + ) + self.vocab_file = vocab_file + self.add_bos_token = add_bos_token + self.add_eos_token = add_eos_token + self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) + self.sp_model.Load(vocab_file) + super().__init__( + bos_token=bos_token, + eos_token=eos_token, + unk_token=unk_token, + pad_token=pad_token, + add_bos_token=add_bos_token, + add_eos_token=add_eos_token, + sp_model_kwargs=self.sp_model_kwargs, + clean_up_tokenization_spaces=clean_up_tokenization_spaces, + **kwargs, + ) + + def __getstate__(self): + state = self.__dict__.copy() + state["sp_model"] = None + return state + + def __setstate__(self, d): + self.__dict__ = d + self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) + self.sp_model.Load(self.vocab_file) + + @property + def vocab_size(self): + """Returns vocab size""" + return self.sp_model.get_piece_size() + + def get_vocab(self): + """Returns vocab as a dict""" + vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} + vocab.update(self.added_tokens_encoder) + return vocab + + def _tokenize(self, text): + """Returns a tokenized string.""" + return self.sp_model.encode(text, out_type=str) + + def _convert_token_to_id(self, token): + """Converts a token (str) in an id using the vocab.""" + return self.sp_model.piece_to_id(token) + + def _convert_id_to_token(self, index): + """Converts an index (integer) in a token (str) using the vocab.""" + token = self.sp_model.IdToPiece(index) + return token + + def convert_tokens_to_string(self, tokens): + """Converts a sequence of tokens (string) in a single string.""" + current_sub_tokens = [] + out_string = "" + prev_is_special = False + for i, token in enumerate(tokens): + # make sure that special tokens are not decoded using sentencepiece model + if token in self.all_special_tokens: + if not prev_is_special and i != 0: + out_string += " " + out_string += self.sp_model.decode(current_sub_tokens) + token + prev_is_special = True + current_sub_tokens = [] + else: + current_sub_tokens.append(token) + prev_is_special = False + out_string += self.sp_model.decode(current_sub_tokens) + return out_string + + def save_vocabulary( + self, save_directory, filename_prefix: Optional[str] = None + ) -> Tuple[str]: + """ + Save the vocabulary and special tokens file to a directory. + + Args: + save_directory (`str`): + The directory in which to save the vocabulary. + + Returns: + `Tuple(str)`: Paths to the files saved. + """ + if not os.path.isdir(save_directory): + logger.error(f"Vocabulary path ({save_directory}) should be a directory") + return + out_vocab_file = os.path.join( + save_directory, + (filename_prefix + "-" if filename_prefix else "") + + VOCAB_FILES_NAMES["vocab_file"], + ) + + if os.path.abspath(self.vocab_file) != os.path.abspath( + out_vocab_file + ) and os.path.isfile(self.vocab_file): + copyfile(self.vocab_file, out_vocab_file) + elif not os.path.isfile(self.vocab_file): + with open(out_vocab_file, "wb") as fi: + content_spiece_model = self.sp_model.serialized_model_proto() + fi.write(content_spiece_model) + + return (out_vocab_file,) + + def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): + bos_token_id = [self.bos_token_id] if self.add_bos_token else [] + eos_token_id = [self.eos_token_id] if self.add_eos_token else [] + + output = bos_token_id + token_ids_0 + eos_token_id + + if token_ids_1 is not None: + output = output + bos_token_id + token_ids_1 + eos_token_id + + return output + + def get_special_tokens_mask( + self, + token_ids_0: List[int], + token_ids_1: Optional[List[int]] = None, + already_has_special_tokens: bool = False, + ) -> List[int]: + """ + Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding + special tokens using the tokenizer `prepare_for_model` method. + + Args: + token_ids_0 (`List[int]`): + List of IDs. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + already_has_special_tokens (`bool`, *optional*, defaults to `False`): + Whether or not the token list is already formatted with special tokens for the model. + + Returns: + `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. + """ + if already_has_special_tokens: + return super().get_special_tokens_mask( + token_ids_0=token_ids_0, + token_ids_1=token_ids_1, + already_has_special_tokens=True, + ) + + bos_token_id = [1] if self.add_bos_token else [] + eos_token_id = [1] if self.add_eos_token else [] + + if token_ids_1 is None: + return bos_token_id + ([0] * len(token_ids_0)) + eos_token_id + return ( + bos_token_id + + ([0] * len(token_ids_0)) + + eos_token_id + + bos_token_id + + ([0] * len(token_ids_1)) + + eos_token_id + ) + + def create_token_type_ids_from_sequences( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT + sequence pair mask has the following format: + + ``` + 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 + | first sequence | second sequence | + ``` + + if token_ids_1 is None, only returns the first portion of the mask (0s). + + Args: + token_ids_0 (`List[int]`): + List of ids. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). + """ + bos_token_id = [self.bos_token_id] if self.add_bos_token else [] + eos_token_id = [self.eos_token_id] if self.add_eos_token else [] + + output = [0] * len(bos_token_id + token_ids_0 + eos_token_id) + + if token_ids_1 is not None: + output += [1] * len(bos_token_id + token_ids_1 + eos_token_id) + + return output diff --git a/tokenizer.model b/tokenizer.model new file mode 100644 index 0000000000000000000000000000000000000000..8306c8232dd5cff9e4674594e06981dedd8c1a03 --- /dev/null +++ b/tokenizer.model @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:386c49cf943d71aa110361135338c50e38beeff0a66593480421f37b319e1a39 +size 1033105 diff --git a/tokenizer_config.json b/tokenizer_config.json new file mode 100644 index 0000000000000000000000000000000000000000..8e78a421246ef986a54dd423ed6a7539d782aa65 --- /dev/null +++ b/tokenizer_config.json @@ -0,0 +1,46 @@ +{ + "add_bos_token": false, + "add_eos_token": false, + "added_tokens_decoder": { + "0": { + "content": "", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false, + "special": true + }, + "1": { + "content": "<|startoftext|>", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false, + "special": true + }, + "2": { + "content": "<|endoftext|>", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false, + "special": true + } + }, + "auto_map": { + "AutoTokenizer": [ + "tokenization_yi.YiTokenizer", + null + ] + }, + "bos_token": "<|startoftext|>", + "clean_up_tokenization_spaces": false, + "eos_token": "<|endoftext|>", + "model_max_length": 4096, + "pad_token": "", + "padding_side": "left", + "sp_model_kwargs": {}, + "split_special_tokens": false, + "tokenizer_class": "YiTokenizer", + "unk_token": "" +}