Du Chen commited on
Commit
260bb10
1 Parent(s): 799b42f

initial commit

Browse files
config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "OrionForCausalLM"
4
+ ],
5
+ "auto_map": {
6
+ "AutoConfig": "configuration_orion.OrionConfig",
7
+ "AutoModelForCausalLM": "modeling_orion.OrionForCausalLM"
8
+ },
9
+ "bos_token_id": 1,
10
+ "eos_token_id": 2,
11
+ "hidden_act": "silu",
12
+ "hidden_size": 5120,
13
+ "model_type": "orion",
14
+ "initializer_range": 0.02,
15
+ "intermediate_size": 15360,
16
+ "max_position_embeddings": 4096,
17
+ "max_sequence_length": 4096,
18
+ "num_attention_heads": 40,
19
+ "num_hidden_layers": 40,
20
+ "num_key_value_heads": 40,
21
+ "pad_token_id": 0,
22
+ "pretraining_tp": 1,
23
+ "rms_norm_eps": 1e-05,
24
+ "rope_scaling": null,
25
+ "rope_theta": 10000.0,
26
+ "tie_word_embeddings": false,
27
+ "torch_dtype": "bfloat16",
28
+ "transformers_version": "4.34.0",
29
+ "use_cache": true,
30
+ "vocab_size": 84608
31
+ }
configuration_orion.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024, OrionStar Inc. All rights reserved.
2
+
3
+ from transformers import PretrainedConfig
4
+
5
+ class OrionConfig(PretrainedConfig):
6
+ model_type = "orion"
7
+ keys_to_ignore_at_inference = ["past_key_values"]
8
+
9
+ def __init__(
10
+ self,
11
+ vocab_size=84608,
12
+ hidden_size=4096,
13
+ intermediate_size=15360,
14
+ num_hidden_layers=40,
15
+ num_attention_heads=40,
16
+ num_key_value_heads=40,
17
+ hidden_act="silu",
18
+ max_position_embeddings=4096,
19
+ initializer_range=0.02,
20
+ rms_norm_eps=1e-5,
21
+ use_cache=True,
22
+ pad_token_id=None,
23
+ bos_token_id=1,
24
+ eos_token_id=2,
25
+ pretraining_tp=1,
26
+ tie_word_embeddings=False,
27
+ rope_theta=10000.0,
28
+ rope_scaling=None,
29
+ attention_bias=False,
30
+ **kwargs,
31
+ ):
32
+ self.vocab_size = vocab_size
33
+ self.max_position_embeddings = max_position_embeddings
34
+ self.hidden_size = hidden_size
35
+ self.intermediate_size = intermediate_size
36
+ self.num_hidden_layers = num_hidden_layers
37
+ self.num_attention_heads = num_attention_heads
38
+
39
+ # for backward compatibility
40
+ if num_key_value_heads is None:
41
+ num_key_value_heads = num_attention_heads
42
+
43
+ self.num_key_value_heads = num_key_value_heads
44
+ self.hidden_act = hidden_act
45
+ self.initializer_range = initializer_range
46
+ self.rms_norm_eps = rms_norm_eps
47
+ self.pretraining_tp = pretraining_tp
48
+ self.use_cache = use_cache
49
+ self.rope_theta = rope_theta
50
+ self.rope_scaling = rope_scaling
51
+ self._rope_scaling_validation()
52
+ self.attention_bias = attention_bias
53
+
54
+ super().__init__(
55
+ pad_token_id=pad_token_id,
56
+ bos_token_id=bos_token_id,
57
+ eos_token_id=eos_token_id,
58
+ tie_word_embeddings=tie_word_embeddings,
59
+ **kwargs,
60
+ )
61
+
62
+ def _rope_scaling_validation(self):
63
+ """
64
+ Validate the `rope_scaling` configuration.
65
+ """
66
+ if self.rope_scaling is None:
67
+ return
68
+
69
+ if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
70
+ raise ValueError(
71
+ "`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, "
72
+ f"got {self.rope_scaling}"
73
+ )
74
+ rope_scaling_type = self.rope_scaling.get("type", None)
75
+ rope_scaling_factor = self.rope_scaling.get("factor", None)
76
+ if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
77
+ raise ValueError(
78
+ f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}"
79
+ )
80
+ if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0:
81
+ raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}")
82
+
generation_config.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "pad_token_id": 0,
6
+ "max_new_tokens": 1024,
7
+ "temperature": 0.3,
8
+ "top_k": 5,
9
+ "top_p": 0.90,
10
+ "repetition_penalty": 1.05,
11
+ "do_sample": true,
12
+ "transformers_version": "4.34.0"
13
+ }
generation_utils.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+ from queue import Queue
3
+
4
+ # build chat input prompt
5
+ def build_chat_input(tokenizer, messages: List[dict]):
6
+ # chat format:
7
+ # single-turn: <s>Human: Hello!\n\nAssistant: </s>
8
+ # multi-turn: <s>Human: Hello!\n\nAssistant: </s>Hi!</s>Human: How are you?\n\nAssistant: </s>I'm fine</s>
9
+
10
+ prompt = "<s>"
11
+ for msg in messages:
12
+ role = msg["role"]
13
+ message = msg["content"]
14
+ if message is None :
15
+ continue
16
+ if role == "user":
17
+ prompt += "Human: " + message + "\n\nAssistant: </s>"
18
+ if role == "assistant":
19
+ prompt += message + "</s>"
20
+
21
+ input_tokens = tokenizer.encode(prompt)
22
+ return input_tokens
23
+
24
+
25
+ class TextIterStreamer:
26
+ def __init__(self, tokenizer, skip_prompt=False, skip_special_tokens=False):
27
+ self.tokenizer = tokenizer
28
+ self.skip_prompt = skip_prompt
29
+ self.skip_special_tokens = skip_special_tokens
30
+ self.tokens = []
31
+ self.text_queue = Queue()
32
+ self.next_tokens_are_prompt = True
33
+
34
+ def put(self, value):
35
+ if self.skip_prompt and self.next_tokens_are_prompt:
36
+ self.next_tokens_are_prompt = False
37
+ else:
38
+ if len(value.shape) > 1:
39
+ value = value[0]
40
+ self.tokens.extend(value.tolist())
41
+ self.text_queue.put(
42
+ self.tokenizer.decode(self.tokens, skip_special_tokens=self.skip_special_tokens))
43
+
44
+ def end(self):
45
+ self.text_queue.put(None)
46
+
47
+ def __iter__(self):
48
+ return self
49
+
50
+ def __next__(self):
51
+ value = self.text_queue.get()
52
+ if value is None:
53
+ raise StopIteration()
54
+ else:
55
+ return value
56
+
modeling_orion.py ADDED
@@ -0,0 +1,1117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 OrionStar Inc. team. All rights reserved.
2
+ # Copied and adapted from https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py
3
+
4
+ from transformers import AutoConfig, AutoModel
5
+
6
+ from .configuration_orion import OrionConfig
7
+
8
+ import numbers
9
+ import importlib
10
+ import math
11
+ from typing import List, Optional, Tuple, Union
12
+
13
+ import torch
14
+ import torch.nn.functional as F
15
+ from torch.nn.parameter import Parameter
16
+ import torch.utils.checkpoint
17
+ from torch import nn
18
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
19
+ from torch.nn import init
20
+
21
+ from transformers.activations import ACT2FN
22
+ from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
23
+ from transformers.modeling_utils import PreTrainedModel
24
+ from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS
25
+ from transformers.utils import (
26
+ add_start_docstrings,
27
+ add_start_docstrings_to_model_forward,
28
+ is_flash_attn_available,
29
+ logging,
30
+ replace_return_docstrings,
31
+ )
32
+ from .generation_utils import build_chat_input, TextIterStreamer
33
+ from transformers.generation.utils import GenerationConfig
34
+ from threading import Thread
35
+
36
+ if is_flash_attn_available():
37
+ from flash_attn import flash_attn_func, flash_attn_varlen_func
38
+ from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
39
+
40
+ logger = logging.get_logger(__name__)
41
+
42
+ _CONFIG_FOR_DOC = "OrionConfig"
43
+
44
+ def _get_unpad_data(padding_mask):
45
+ seqlens_in_batch = padding_mask.sum(dim=-1, dtype=torch.int32)
46
+ indices = torch.nonzero(padding_mask.flatten(), as_tuple=False).flatten()
47
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
48
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0))
49
+ return (
50
+ indices,
51
+ cu_seqlens,
52
+ max_seqlen_in_batch,
53
+ )
54
+
55
+
56
+ # Copied from transformers.models.bart.modeling_bart._make_causal_mask
57
+ def _make_causal_mask(
58
+ input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
59
+ ):
60
+ """
61
+ Make causal mask used for bi-directional self-attention.
62
+ """
63
+ bsz, tgt_len = input_ids_shape
64
+ mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device)
65
+ mask_cond = torch.arange(mask.size(-1), device=device)
66
+ mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
67
+ mask = mask.to(dtype)
68
+
69
+ if past_key_values_length > 0:
70
+ mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
71
+ return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
72
+
73
+
74
+ # Copied from transformers.models.bart.modeling_bart._expand_mask
75
+ def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
76
+ """
77
+ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
78
+ """
79
+ bsz, src_len = mask.size()
80
+ tgt_len = tgt_len if tgt_len is not None else src_len
81
+
82
+ expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
83
+
84
+ inverted_mask = 1.0 - expanded_mask
85
+
86
+ return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
87
+
88
+ class OrionRotaryEmbedding(nn.Module):
89
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
90
+ super().__init__()
91
+
92
+ self.dim = dim
93
+ self.max_position_embeddings = max_position_embeddings
94
+ self.base = base
95
+ inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
96
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
97
+
98
+ # Build here to make `torch.jit.trace` work.
99
+ self._set_cos_sin_cache(
100
+ seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()
101
+ )
102
+
103
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
104
+ self.max_seq_len_cached = seq_len
105
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)
106
+
107
+ freqs = torch.einsum("i,j->ij", t, self.inv_freq)
108
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
109
+ emb = torch.cat((freqs, freqs), dim=-1)
110
+ self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
111
+ self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
112
+
113
+ def forward(self, x, seq_len=None):
114
+ # x: [bs, num_attention_heads, seq_len, head_size]
115
+ if seq_len > self.max_seq_len_cached:
116
+ self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype)
117
+
118
+ return (
119
+ self.cos_cached[:seq_len].to(dtype=x.dtype),
120
+ self.sin_cached[:seq_len].to(dtype=x.dtype),
121
+ )
122
+
123
+
124
+ class OrionLinearScalingRotaryEmbedding(OrionRotaryEmbedding):
125
+ """OrionRotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev"""
126
+
127
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
128
+ self.scaling_factor = scaling_factor
129
+ super().__init__(dim, max_position_embeddings, base, device)
130
+
131
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
132
+ self.max_seq_len_cached = seq_len
133
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)
134
+ t = t / self.scaling_factor
135
+
136
+ freqs = torch.einsum("i,j->ij", t, self.inv_freq)
137
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
138
+ emb = torch.cat((freqs, freqs), dim=-1)
139
+ self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
140
+ self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
141
+
142
+
143
+ class OrionDynamicNTKScalingRotaryEmbedding(OrionRotaryEmbedding):
144
+ """OrionRotaryEmbedding extended with Dynamic NTK scaling. Credits to the Reddit users /u/bloc97 and /u/emozilla"""
145
+
146
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
147
+ self.scaling_factor = scaling_factor
148
+ super().__init__(dim, max_position_embeddings, base, device)
149
+
150
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
151
+ self.max_seq_len_cached = seq_len
152
+
153
+ if seq_len > self.max_position_embeddings:
154
+ base = self.base * (
155
+ (self.scaling_factor * seq_len / self.max_position_embeddings) - (self.scaling_factor - 1)
156
+ ) ** (self.dim / (self.dim - 2))
157
+ inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
158
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
159
+
160
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)
161
+
162
+ freqs = torch.einsum("i,j->ij", t, self.inv_freq)
163
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
164
+ emb = torch.cat((freqs, freqs), dim=-1)
165
+ self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
166
+ self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
167
+
168
+
169
+ def rotate_half(x):
170
+ """Rotates half the hidden dims of the input."""
171
+ x1 = x[..., : x.shape[-1] // 2]
172
+ x2 = x[..., x.shape[-1] // 2 :]
173
+ return torch.cat((-x2, x1), dim=-1)
174
+
175
+
176
+ # Copied from transformers.models.gpt_neox.modeling_gpt_neox.apply_rotary_pos_emb
177
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids):
178
+ cos = cos[position_ids].unsqueeze(1) # [seq_len, dim] -> [batch_size, 1, seq_len, head_dim]
179
+ sin = sin[position_ids].unsqueeze(1)
180
+ q_embed = (q * cos) + (rotate_half(q) * sin)
181
+ k_embed = (k * cos) + (rotate_half(k) * sin)
182
+ return q_embed, k_embed
183
+
184
+
185
+ class OrionMLP(nn.Module):
186
+ def __init__(self, config):
187
+ super().__init__()
188
+ self.config = config
189
+ self.hidden_size = config.hidden_size
190
+ self.intermediate_size = config.intermediate_size
191
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
192
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
193
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
194
+ self.act_fn = ACT2FN[config.hidden_act]
195
+
196
+ def forward(self, x):
197
+ if self.config.pretraining_tp > 1:
198
+ slice = self.intermediate_size // self.config.pretraining_tp
199
+ gate_proj_slices = self.gate_proj.weight.split(slice, dim=0)
200
+ up_proj_slices = self.up_proj.weight.split(slice, dim=0)
201
+ down_proj_slices = self.down_proj.weight.split(slice, dim=1)
202
+
203
+ gate_proj = torch.cat(
204
+ [F.linear(x, gate_proj_slices[i]) for i in range(self.config.pretraining_tp)], dim=-1
205
+ )
206
+ up_proj = torch.cat([F.linear(x, up_proj_slices[i]) for i in range(self.config.pretraining_tp)], dim=-1)
207
+
208
+ intermediate_states = (self.act_fn(gate_proj) * up_proj).split(slice, dim=2)
209
+ down_proj = [
210
+ F.linear(intermediate_states[i], down_proj_slices[i]) for i in range(self.config.pretraining_tp)
211
+ ]
212
+ down_proj = sum(down_proj)
213
+ else:
214
+ down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
215
+
216
+ return down_proj
217
+
218
+
219
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
220
+ """
221
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
222
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
223
+ """
224
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
225
+ if n_rep == 1:
226
+ return hidden_states
227
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
228
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
229
+
230
+
231
+ class OrionAttention(nn.Module):
232
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
233
+
234
+ def __init__(self, config: OrionConfig):
235
+ super().__init__()
236
+ self.config = config
237
+ self.hidden_size = config.hidden_size
238
+ self.num_heads = config.num_attention_heads
239
+ self.head_dim = self.hidden_size // self.num_heads
240
+ self.num_key_value_heads = config.num_key_value_heads
241
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
242
+ self.max_position_embeddings = config.max_position_embeddings
243
+ self.rope_theta = config.rope_theta
244
+
245
+ if (self.head_dim * self.num_heads) != self.hidden_size:
246
+ raise ValueError(
247
+ f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
248
+ f" and `num_heads`: {self.num_heads})."
249
+ )
250
+ self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias)
251
+ self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
252
+ self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
253
+ self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=config.attention_bias)
254
+ self._init_rope()
255
+
256
+ def _init_rope(self):
257
+ if self.config.rope_scaling is None:
258
+ self.rotary_emb = OrionRotaryEmbedding(
259
+ self.head_dim,
260
+ max_position_embeddings=self.max_position_embeddings,
261
+ base=self.rope_theta,
262
+ )
263
+ else:
264
+ scaling_type = self.config.rope_scaling["type"]
265
+ scaling_factor = self.config.rope_scaling["factor"]
266
+ if scaling_type == "linear":
267
+ self.rotary_emb = OrionLinearScalingRotaryEmbedding(
268
+ self.head_dim,
269
+ max_position_embeddings=self.max_position_embeddings,
270
+ scaling_factor=scaling_factor,
271
+ base=self.rope_theta,
272
+ )
273
+ elif scaling_type == "dynamic":
274
+ self.rotary_emb = OrionDynamicNTKScalingRotaryEmbedding(
275
+ self.head_dim,
276
+ max_position_embeddings=self.max_position_embeddings,
277
+ scaling_factor=scaling_factor,
278
+ base=self.rope_theta,
279
+ )
280
+ else:
281
+ raise ValueError(f"Unknown RoPE scaling type {scaling_type}")
282
+
283
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
284
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
285
+
286
+ def forward(
287
+ self,
288
+ hidden_states: torch.Tensor,
289
+ attention_mask: Optional[torch.Tensor] = None,
290
+ position_ids: Optional[torch.LongTensor] = None,
291
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
292
+ output_attentions: bool = False,
293
+ use_cache: bool = False,
294
+ padding_mask: Optional[torch.LongTensor] = None,
295
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
296
+ bsz, q_len, _ = hidden_states.size()
297
+
298
+ if self.config.pretraining_tp > 1:
299
+ key_value_slicing = (self.num_key_value_heads * self.head_dim) // self.config.pretraining_tp
300
+ query_slices = self.q_proj.weight.split(
301
+ (self.num_heads * self.head_dim) // self.config.pretraining_tp, dim=0
302
+ )
303
+ key_slices = self.k_proj.weight.split(key_value_slicing, dim=0)
304
+ value_slices = self.v_proj.weight.split(key_value_slicing, dim=0)
305
+
306
+ query_states = [F.linear(hidden_states, query_slices[i]) for i in range(self.config.pretraining_tp)]
307
+ query_states = torch.cat(query_states, dim=-1)
308
+
309
+ key_states = [F.linear(hidden_states, key_slices[i]) for i in range(self.config.pretraining_tp)]
310
+ key_states = torch.cat(key_states, dim=-1)
311
+
312
+ value_states = [F.linear(hidden_states, value_slices[i]) for i in range(self.config.pretraining_tp)]
313
+ value_states = torch.cat(value_states, dim=-1)
314
+
315
+ else:
316
+ query_states = self.q_proj(hidden_states)
317
+ key_states = self.k_proj(hidden_states)
318
+ value_states = self.v_proj(hidden_states)
319
+
320
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
321
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
322
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
323
+
324
+ kv_seq_len = key_states.shape[-2]
325
+ if past_key_value is not None:
326
+ kv_seq_len += past_key_value[0].shape[-2]
327
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
328
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
329
+
330
+ if past_key_value is not None:
331
+ # reuse k, v, self_attention
332
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
333
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
334
+
335
+ past_key_value = (key_states, value_states) if use_cache else None
336
+
337
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
338
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
339
+
340
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
341
+
342
+ if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
343
+ raise ValueError(
344
+ f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is"
345
+ f" {attn_weights.size()}"
346
+ )
347
+
348
+ if attention_mask is not None:
349
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
350
+ raise ValueError(
351
+ f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
352
+ )
353
+ attn_weights = attn_weights + attention_mask
354
+
355
+ # upcast attention to fp32
356
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
357
+ attn_output = torch.matmul(attn_weights, value_states)
358
+
359
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
360
+ raise ValueError(
361
+ f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
362
+ f" {attn_output.size()}"
363
+ )
364
+
365
+ attn_output = attn_output.transpose(1, 2).contiguous()
366
+
367
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
368
+
369
+ if self.config.pretraining_tp > 1:
370
+ attn_output = attn_output.split(self.hidden_size // self.config.pretraining_tp, dim=2)
371
+ o_proj_slices = self.o_proj.weight.split(self.hidden_size // self.config.pretraining_tp, dim=1)
372
+ attn_output = sum([F.linear(attn_output[i], o_proj_slices[i]) for i in range(self.config.pretraining_tp)])
373
+ else:
374
+ attn_output = self.o_proj(attn_output)
375
+
376
+ if not output_attentions:
377
+ attn_weights = None
378
+
379
+ return attn_output, attn_weights, past_key_value
380
+
381
+
382
+ class OrionFlashAttention2(OrionAttention):
383
+ """
384
+ Orion flash attention module. This module inherits from `OrionAttention` as the weights of the module stays
385
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
386
+ flash attention and deal with padding tokens in case the input contains any of them.
387
+ """
388
+
389
+ def forward(
390
+ self,
391
+ hidden_states: torch.Tensor,
392
+ attention_mask: Optional[torch.Tensor] = None,
393
+ position_ids: Optional[torch.LongTensor] = None,
394
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
395
+ output_attentions: bool = False,
396
+ use_cache: bool = False,
397
+ padding_mask: Optional[torch.LongTensor] = None,
398
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
399
+ # OrionFlashAttention2 attention does not support output_attentions
400
+ output_attentions = False
401
+
402
+ bsz, q_len, _ = hidden_states.size()
403
+
404
+ query_states = self.q_proj(hidden_states)
405
+ key_states = self.k_proj(hidden_states)
406
+ value_states = self.v_proj(hidden_states)
407
+
408
+ # Flash attention requires the input to have the shape
409
+ # batch_size x seq_length x head_dime x hidden_dim
410
+ # therefore we just need to keep the original shape
411
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
412
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
413
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
414
+
415
+ kv_seq_len = key_states.shape[-2]
416
+ if past_key_value is not None:
417
+ kv_seq_len += past_key_value[0].shape[-2]
418
+
419
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
420
+
421
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
422
+
423
+ if past_key_value is not None:
424
+ # reuse k, v, self_attention
425
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
426
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
427
+
428
+ past_key_value = (key_states, value_states) if use_cache else None
429
+
430
+ query_states = query_states.transpose(1, 2)
431
+ key_states = key_states.transpose(1, 2)
432
+ value_states = value_states.transpose(1, 2)
433
+
434
+ # TODO: llama does not have dropout in the config??
435
+ # It is recommended to use dropout with FA according to the docs
436
+ # when training.
437
+ dropout_rate = 0.0 # if not self.training else self.attn_dropout
438
+
439
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
440
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
441
+ # cast them back in float16 just to be sure everything works as expected.
442
+ # This might slowdown training & inference so it is recommended to not cast the LayerNorms
443
+ # in fp32. (LlamaRMSNorm handles it correctly)
444
+ input_dtype = query_states.dtype
445
+ if input_dtype == torch.float32:
446
+ logger.warning_once(
447
+ "The input hidden states seems to be silently casted in float32, this might be related to"
448
+ " the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
449
+ " float16."
450
+ )
451
+
452
+ query_states = query_states.to(torch.float16)
453
+ key_states = key_states.to(torch.float16)
454
+ value_states = value_states.to(torch.float16)
455
+
456
+ attn_output = self._flash_attention_forward(
457
+ query_states, key_states, value_states, padding_mask, q_len, dropout=dropout_rate
458
+ )
459
+
460
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
461
+ attn_output = self.o_proj(attn_output)
462
+
463
+ if not output_attentions:
464
+ attn_weights = None
465
+
466
+ return attn_output, attn_weights, past_key_value
467
+
468
+ def _flash_attention_forward(
469
+ self, query_states, key_states, value_states, padding_mask, query_length, dropout=0.0, softmax_scale=None
470
+ ):
471
+ """
472
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
473
+ first unpad the input, then computes the attention scores and pad the final attention scores.
474
+
475
+ Args:
476
+ query_states (`torch.Tensor`):
477
+ Input query states to be passed to Flash Attention API
478
+ key_states (`torch.Tensor`):
479
+ Input key states to be passed to Flash Attention API
480
+ value_states (`torch.Tensor`):
481
+ Input value states to be passed to Flash Attention API
482
+ padding_mask (`torch.Tensor`):
483
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
484
+ position of padding tokens and 1 for the position of non-padding tokens.
485
+ dropout (`int`, *optional*):
486
+ Attention dropout
487
+ softmax_scale (`float`, *optional*):
488
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
489
+ """
490
+ # Contains at least one padding token in the sequence
491
+ if padding_mask is not None:
492
+ batch_size = query_states.shape[0]
493
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
494
+ query_states, key_states, value_states, padding_mask, query_length
495
+ )
496
+
497
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
498
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
499
+
500
+ attn_output_unpad = flash_attn_varlen_func(
501
+ query_states,
502
+ key_states,
503
+ value_states,
504
+ cu_seqlens_q=cu_seqlens_q,
505
+ cu_seqlens_k=cu_seqlens_k,
506
+ max_seqlen_q=max_seqlen_in_batch_q,
507
+ max_seqlen_k=max_seqlen_in_batch_k,
508
+ dropout_p=dropout,
509
+ softmax_scale=softmax_scale,
510
+ causal=True,
511
+ )
512
+
513
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
514
+ else:
515
+ attn_output = flash_attn_func(
516
+ query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=True
517
+ )
518
+
519
+ return attn_output
520
+
521
+ def _upad_input(self, query_layer, key_layer, value_layer, padding_mask, query_length):
522
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(padding_mask)
523
+ batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
524
+
525
+ key_layer = index_first_axis(
526
+ key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
527
+ )
528
+ value_layer = index_first_axis(
529
+ value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
530
+ )
531
+ if query_length == kv_seq_len:
532
+ query_layer = index_first_axis(
533
+ query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k
534
+ )
535
+ cu_seqlens_q = cu_seqlens_k
536
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
537
+ indices_q = indices_k
538
+ elif query_length == 1:
539
+ max_seqlen_in_batch_q = 1
540
+ cu_seqlens_q = torch.arange(
541
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
542
+ ) # There is a memcpy here, that is very bad.
543
+ indices_q = cu_seqlens_q[:-1]
544
+ query_layer = query_layer.squeeze(1)
545
+ else:
546
+ # The -q_len: slice assumes left padding.
547
+ padding_mask = padding_mask[:, -query_length:]
548
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, padding_mask)
549
+
550
+ return (
551
+ query_layer,
552
+ key_layer,
553
+ value_layer,
554
+ indices_q,
555
+ (cu_seqlens_q, cu_seqlens_k),
556
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
557
+ )
558
+
559
+
560
+ class OrionDecoderLayer(nn.Module):
561
+ def __init__(self, config: OrionConfig):
562
+ super().__init__()
563
+ self.hidden_size = config.hidden_size
564
+ self.self_attn = (
565
+ OrionAttention(config=config)
566
+ if not getattr(config, "_flash_attn_2_enabled", False)
567
+ else OrionFlashAttention2(config=config)
568
+ )
569
+ self.mlp = OrionMLP(config)
570
+ self.input_layernorm = nn.LayerNorm(config.hidden_size, eps=config.rms_norm_eps)
571
+ self.post_attention_layernorm = nn.LayerNorm(config.hidden_size, eps=config.rms_norm_eps)
572
+
573
+ def forward(
574
+ self,
575
+ hidden_states: torch.Tensor,
576
+ attention_mask: Optional[torch.Tensor] = None,
577
+ position_ids: Optional[torch.LongTensor] = None,
578
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
579
+ output_attentions: Optional[bool] = False,
580
+ use_cache: Optional[bool] = False,
581
+ padding_mask: Optional[torch.LongTensor] = None,
582
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
583
+ """
584
+ Args:
585
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
586
+ attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
587
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
588
+ output_attentions (`bool`, *optional*):
589
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
590
+ returned tensors for more detail.
591
+ use_cache (`bool`, *optional*):
592
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
593
+ (see `past_key_values`).
594
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
595
+ """
596
+
597
+ residual = hidden_states
598
+
599
+ hidden_states = self.input_layernorm(hidden_states)
600
+
601
+ # Self Attention
602
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
603
+ hidden_states=hidden_states,
604
+ attention_mask=attention_mask,
605
+ position_ids=position_ids,
606
+ past_key_value=past_key_value,
607
+ output_attentions=output_attentions,
608
+ use_cache=use_cache,
609
+ padding_mask=padding_mask,
610
+ )
611
+ hidden_states = residual + hidden_states
612
+
613
+ # Fully Connected
614
+ residual = hidden_states
615
+ hidden_states = self.post_attention_layernorm(hidden_states)
616
+ hidden_states = self.mlp(hidden_states)
617
+ hidden_states = residual + hidden_states
618
+
619
+ outputs = (hidden_states,)
620
+
621
+ if output_attentions:
622
+ outputs += (self_attn_weights,)
623
+
624
+ if use_cache:
625
+ outputs += (present_key_value,)
626
+
627
+ return outputs
628
+
629
+ class OrionPreTrainedModel(PreTrainedModel):
630
+ config_class = OrionConfig
631
+ base_model_prefix = "model"
632
+ supports_gradient_checkpointing = True
633
+ _no_split_modules = ["OrionDecoderLayer"]
634
+ _skip_keys_device_placement = "past_key_values"
635
+ _supports_flash_attn_2 = True
636
+
637
+ def _init_weights(self, module):
638
+ std = self.config.initializer_range
639
+ if isinstance(module, nn.Linear):
640
+ module.weight.data.normal_(mean=0.0, std=std)
641
+ if module.bias is not None:
642
+ module.bias.data.zero_()
643
+ elif isinstance(module, nn.Embedding):
644
+ module.weight.data.normal_(mean=0.0, std=std)
645
+ if module.padding_idx is not None:
646
+ module.weight.data[module.padding_idx].zero_()
647
+
648
+ def _set_gradient_checkpointing(self, module, value=False):
649
+ if isinstance(module, OrionModel):
650
+ module.gradient_checkpointing = value
651
+
652
+ class OrionModel(OrionPreTrainedModel):
653
+ """
654
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`OrionDecoderLayer`]
655
+
656
+ Args:
657
+ config: OrionConfig
658
+ """
659
+
660
+ def __init__(self, config: OrionConfig):
661
+ super().__init__(config)
662
+ self.padding_idx = config.pad_token_id
663
+ self.vocab_size = config.vocab_size
664
+
665
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
666
+ self.layers = nn.ModuleList([OrionDecoderLayer(config) for _ in range(config.num_hidden_layers)])
667
+ self.norm = nn.LayerNorm(config.hidden_size, eps=config.rms_norm_eps)
668
+
669
+ self.gradient_checkpointing = False
670
+ # Initialize weights and apply final processing
671
+ self.post_init()
672
+
673
+ def get_input_embeddings(self):
674
+ return self.embed_tokens
675
+
676
+ def set_input_embeddings(self, value):
677
+ self.embed_tokens = value
678
+
679
+ # Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask
680
+ def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
681
+ # create causal mask
682
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
683
+ combined_attention_mask = None
684
+ if input_shape[-1] > 1:
685
+ combined_attention_mask = _make_causal_mask(
686
+ input_shape,
687
+ inputs_embeds.dtype,
688
+ device=inputs_embeds.device,
689
+ past_key_values_length=past_key_values_length,
690
+ )
691
+
692
+ if attention_mask is not None:
693
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
694
+ expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to(
695
+ inputs_embeds.device
696
+ )
697
+ combined_attention_mask = (
698
+ expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
699
+ )
700
+
701
+ return combined_attention_mask
702
+
703
+ def forward(
704
+ self,
705
+ input_ids: torch.LongTensor = None,
706
+ attention_mask: Optional[torch.Tensor] = None,
707
+ position_ids: Optional[torch.LongTensor] = None,
708
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
709
+ inputs_embeds: Optional[torch.FloatTensor] = None,
710
+ use_cache: Optional[bool] = None,
711
+ output_attentions: Optional[bool] = None,
712
+ output_hidden_states: Optional[bool] = None,
713
+ return_dict: Optional[bool] = None,
714
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
715
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
716
+ output_hidden_states = (
717
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
718
+ )
719
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
720
+
721
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
722
+
723
+ # retrieve input_ids and inputs_embeds
724
+ if input_ids is not None and inputs_embeds is not None:
725
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
726
+ elif input_ids is not None:
727
+ batch_size, seq_length = input_ids.shape
728
+ elif inputs_embeds is not None:
729
+ batch_size, seq_length, _ = inputs_embeds.shape
730
+ else:
731
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
732
+
733
+ seq_length_with_past = seq_length
734
+ past_key_values_length = 0
735
+
736
+ if past_key_values is not None:
737
+ past_key_values_length = past_key_values[0][0].shape[2]
738
+ seq_length_with_past = seq_length_with_past + past_key_values_length
739
+
740
+ if position_ids is None:
741
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
742
+ position_ids = torch.arange(
743
+ past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
744
+ )
745
+ position_ids = position_ids.unsqueeze(0)
746
+
747
+ if inputs_embeds is None:
748
+ inputs_embeds = self.embed_tokens(input_ids)
749
+ # embed positions
750
+ if attention_mask is None:
751
+ attention_mask = torch.ones(
752
+ (batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device
753
+ )
754
+ padding_mask = None
755
+ else:
756
+ if 0 in attention_mask:
757
+ padding_mask = attention_mask
758
+ else:
759
+ padding_mask = None
760
+
761
+ attention_mask = self._prepare_decoder_attention_mask(
762
+ attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length
763
+ )
764
+
765
+ hidden_states = inputs_embeds
766
+
767
+ if self.gradient_checkpointing and self.training:
768
+ if use_cache:
769
+ logger.warning_once(
770
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
771
+ )
772
+ use_cache = False
773
+
774
+ # decoder layers
775
+ all_hidden_states = () if output_hidden_states else None
776
+ all_self_attns = () if output_attentions else None
777
+ next_decoder_cache = () if use_cache else None
778
+
779
+ for idx, decoder_layer in enumerate(self.layers):
780
+ if output_hidden_states:
781
+ all_hidden_states += (hidden_states,)
782
+
783
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
784
+
785
+ if self.gradient_checkpointing and self.training:
786
+
787
+ def create_custom_forward(module):
788
+ def custom_forward(*inputs):
789
+ # None for past_key_value
790
+ return module(*inputs, past_key_value, output_attentions, padding_mask=padding_mask)
791
+
792
+ return custom_forward
793
+
794
+ layer_outputs = torch.utils.checkpoint.checkpoint(
795
+ create_custom_forward(decoder_layer), hidden_states, attention_mask, position_ids
796
+ )
797
+ else:
798
+ layer_outputs = decoder_layer(
799
+ hidden_states,
800
+ attention_mask=attention_mask,
801
+ position_ids=position_ids,
802
+ past_key_value=past_key_value,
803
+ output_attentions=output_attentions,
804
+ use_cache=use_cache,
805
+ padding_mask=padding_mask,
806
+ )
807
+
808
+ hidden_states = layer_outputs[0]
809
+
810
+ if use_cache:
811
+ next_decoder_cache += (layer_outputs[2 if output_attentions else 1],)
812
+
813
+ if output_attentions:
814
+ all_self_attns += (layer_outputs[1],)
815
+
816
+ hidden_states = self.norm(hidden_states)
817
+
818
+ # add hidden states from the last decoder layer
819
+ if output_hidden_states:
820
+ all_hidden_states += (hidden_states,)
821
+
822
+ next_cache = next_decoder_cache if use_cache else None
823
+ if not return_dict:
824
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
825
+ return BaseModelOutputWithPast(
826
+ last_hidden_state=hidden_states,
827
+ past_key_values=next_cache,
828
+ hidden_states=all_hidden_states,
829
+ attentions=all_self_attns,
830
+ )
831
+
832
+
833
+ class OrionForCausalLM(OrionPreTrainedModel):
834
+ model_type = "orion"
835
+ _tied_weights_keys = ["lm_head.weight"]
836
+
837
+ def __init__(self, config):
838
+ super().__init__(config)
839
+ self.model = OrionModel(config)
840
+ self.vocab_size = config.vocab_size
841
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
842
+
843
+ # Initialize weights and apply final processing
844
+ self.post_init()
845
+
846
+ def get_input_embeddings(self):
847
+ return self.model.embed_tokens
848
+
849
+ def set_input_embeddings(self, value):
850
+ self.model.embed_tokens = value
851
+
852
+ def get_output_embeddings(self):
853
+ return self.lm_head
854
+
855
+ def set_output_embeddings(self, new_embeddings):
856
+ self.lm_head = new_embeddings
857
+
858
+ def set_decoder(self, decoder):
859
+ self.model = decoder
860
+
861
+ def get_decoder(self):
862
+ return self.model
863
+
864
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
865
+ def forward(
866
+ self,
867
+ input_ids: torch.LongTensor = None,
868
+ attention_mask: Optional[torch.Tensor] = None,
869
+ position_ids: Optional[torch.LongTensor] = None,
870
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
871
+ inputs_embeds: Optional[torch.FloatTensor] = None,
872
+ labels: Optional[torch.LongTensor] = None,
873
+ use_cache: Optional[bool] = None,
874
+ output_attentions: Optional[bool] = None,
875
+ output_hidden_states: Optional[bool] = None,
876
+ return_dict: Optional[bool] = None,
877
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
878
+ r"""
879
+ Args:
880
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
881
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
882
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
883
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
884
+
885
+ Returns:
886
+
887
+ Example:
888
+
889
+ ```python
890
+ >>> from transformers import AutoTokenizer, OrionForCausalLM
891
+
892
+ >>> model = OrionForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)
893
+ >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)
894
+
895
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
896
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
897
+
898
+ >>> # Generate
899
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
900
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
901
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
902
+ ```"""
903
+
904
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
905
+ output_hidden_states = (
906
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
907
+ )
908
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
909
+
910
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
911
+ outputs = self.model(
912
+ input_ids=input_ids,
913
+ attention_mask=attention_mask,
914
+ position_ids=position_ids,
915
+ past_key_values=past_key_values,
916
+ inputs_embeds=inputs_embeds,
917
+ use_cache=use_cache,
918
+ output_attentions=output_attentions,
919
+ output_hidden_states=output_hidden_states,
920
+ return_dict=return_dict,
921
+ )
922
+
923
+ hidden_states = outputs[0]
924
+ if self.config.pretraining_tp > 1:
925
+ lm_head_slices = self.lm_head.weight.split(self.vocab_size // self.config.pretraining_tp, dim=0)
926
+ logits = [F.linear(hidden_states, lm_head_slices[i]) for i in range(self.config.pretraining_tp)]
927
+ logits = torch.cat(logits, dim=-1)
928
+ else:
929
+ logits = self.lm_head(hidden_states)
930
+ logits = logits.float()
931
+
932
+ loss = None
933
+ if labels is not None:
934
+ # Shift so that tokens < n predict n
935
+ shift_logits = logits[..., :-1, :].contiguous()
936
+ shift_labels = labels[..., 1:].contiguous()
937
+ # Flatten the tokens
938
+ loss_fct = CrossEntropyLoss()
939
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
940
+ shift_labels = shift_labels.view(-1)
941
+ # Enable model parallelism
942
+ shift_labels = shift_labels.to(shift_logits.device)
943
+ loss = loss_fct(shift_logits, shift_labels)
944
+
945
+ if not return_dict:
946
+ output = (logits,) + outputs[1:]
947
+ return (loss,) + output if loss is not None else output
948
+
949
+ return CausalLMOutputWithPast(
950
+ loss=loss,
951
+ logits=logits,
952
+ past_key_values=outputs.past_key_values,
953
+ hidden_states=outputs.hidden_states,
954
+ attentions=outputs.attentions,
955
+ )
956
+
957
+ def chat(self, tokenizer, messages: List[dict], streaming=False,generation_config: Optional[GenerationConfig]=None):
958
+ generation_config = generation_config or self.generation_config
959
+ input_tokens = build_chat_input(tokenizer,messages)
960
+ input_ids = torch.LongTensor([input_tokens]).to(self.device)
961
+
962
+ if streaming:
963
+ streamer = TextIterStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
964
+ Thread(target=self.generate, kwargs=dict(
965
+ inputs=input_ids, streamer=streamer,
966
+ generation_config=generation_config,
967
+ )).start()
968
+ return streamer
969
+ else:
970
+ outputs = self.generate(input_ids, generation_config=generation_config)
971
+ response = tokenizer.decode(outputs[0][len(input_ids[0]):], skip_special_tokens=True)
972
+ return response
973
+
974
+ def prepare_inputs_for_generation(
975
+ self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
976
+ ):
977
+ if past_key_values:
978
+ input_ids = input_ids[:, -1:]
979
+
980
+ position_ids = kwargs.get("position_ids", None)
981
+ if attention_mask is not None and position_ids is None:
982
+ # create position_ids on the fly for batch generation
983
+ position_ids = attention_mask.long().cumsum(-1) - 1
984
+ position_ids.masked_fill_(attention_mask == 0, 1)
985
+ if past_key_values:
986
+ position_ids = position_ids[:, -1].unsqueeze(-1)
987
+
988
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
989
+ if inputs_embeds is not None and past_key_values is None:
990
+ model_inputs = {"inputs_embeds": inputs_embeds}
991
+ else:
992
+ model_inputs = {"input_ids": input_ids}
993
+
994
+ model_inputs.update(
995
+ {
996
+ "position_ids": position_ids,
997
+ "past_key_values": past_key_values,
998
+ "use_cache": kwargs.get("use_cache"),
999
+ "attention_mask": attention_mask,
1000
+ }
1001
+ )
1002
+ return model_inputs
1003
+
1004
+ @staticmethod
1005
+ def _reorder_cache(past_key_values, beam_idx):
1006
+ reordered_past = ()
1007
+ for layer_past in past_key_values:
1008
+ reordered_past += (
1009
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
1010
+ )
1011
+ return reordered_past
1012
+
1013
+ class OrionForSequenceClassification(OrionPreTrainedModel):
1014
+ def __init__(self, config):
1015
+ super().__init__(config)
1016
+ self.num_labels = config.num_labels
1017
+ self.model = OrionModel(config)
1018
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
1019
+
1020
+ # Initialize weights and apply final processing
1021
+ self.post_init()
1022
+
1023
+ def get_input_embeddings(self):
1024
+ return self.model.embed_tokens
1025
+
1026
+ def set_input_embeddings(self, value):
1027
+ self.model.embed_tokens = value
1028
+
1029
+ def forward(
1030
+ self,
1031
+ input_ids: torch.LongTensor = None,
1032
+ attention_mask: Optional[torch.Tensor] = None,
1033
+ position_ids: Optional[torch.LongTensor] = None,
1034
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1035
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1036
+ labels: Optional[torch.LongTensor] = None,
1037
+ use_cache: Optional[bool] = None,
1038
+ output_attentions: Optional[bool] = None,
1039
+ output_hidden_states: Optional[bool] = None,
1040
+ return_dict: Optional[bool] = None,
1041
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
1042
+ r"""
1043
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1044
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1045
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1046
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1047
+ """
1048
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1049
+
1050
+ transformer_outputs = self.model(
1051
+ input_ids,
1052
+ attention_mask=attention_mask,
1053
+ position_ids=position_ids,
1054
+ past_key_values=past_key_values,
1055
+ inputs_embeds=inputs_embeds,
1056
+ use_cache=use_cache,
1057
+ output_attentions=output_attentions,
1058
+ output_hidden_states=output_hidden_states,
1059
+ return_dict=return_dict,
1060
+ )
1061
+ hidden_states = transformer_outputs[0]
1062
+ logits = self.score(hidden_states)
1063
+
1064
+ if input_ids is not None:
1065
+ batch_size = input_ids.shape[0]
1066
+ else:
1067
+ batch_size = inputs_embeds.shape[0]
1068
+
1069
+ if self.config.pad_token_id is None and batch_size != 1:
1070
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
1071
+ if self.config.pad_token_id is None:
1072
+ sequence_lengths = -1
1073
+ else:
1074
+ if input_ids is not None:
1075
+ sequence_lengths = (torch.eq(input_ids, self.config.pad_token_id).long().argmax(-1) - 1).to(
1076
+ logits.device
1077
+ )
1078
+ else:
1079
+ sequence_lengths = -1
1080
+
1081
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
1082
+
1083
+ loss = None
1084
+ if labels is not None:
1085
+ labels = labels.to(logits.device)
1086
+ if self.config.problem_type is None:
1087
+ if self.num_labels == 1:
1088
+ self.config.problem_type = "regression"
1089
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1090
+ self.config.problem_type = "single_label_classification"
1091
+ else:
1092
+ self.config.problem_type = "multi_label_classification"
1093
+
1094
+ if self.config.problem_type == "regression":
1095
+ loss_fct = MSELoss()
1096
+ if self.num_labels == 1:
1097
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
1098
+ else:
1099
+ loss = loss_fct(pooled_logits, labels)
1100
+ elif self.config.problem_type == "single_label_classification":
1101
+ loss_fct = CrossEntropyLoss()
1102
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
1103
+ elif self.config.problem_type == "multi_label_classification":
1104
+ loss_fct = BCEWithLogitsLoss()
1105
+ loss = loss_fct(pooled_logits, labels)
1106
+ if not return_dict:
1107
+ output = (pooled_logits,) + transformer_outputs[1:]
1108
+ return ((loss,) + output) if loss is not None else output
1109
+
1110
+ return SequenceClassifierOutputWithPast(
1111
+ loss=loss,
1112
+ logits=pooled_logits,
1113
+ past_key_values=transformer_outputs.past_key_values,
1114
+ hidden_states=transformer_outputs.hidden_states,
1115
+ attentions=transformer_outputs.attentions,
1116
+ )
1117
+
pytorch_model-00001-of-00003.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0d5bf5356a58377bb9a246e1c2b54e3b3f11316d44989eaf2315f11597cb8d8b
3
+ size 9937152090
pytorch_model-00002-of-00003.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d15c2a9baa881f4c4bfacd5bd1237af782e262af7f3d1a8ff25032b201a9db1
3
+ size 9857241994
pytorch_model-00003-of-00003.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f3b8d53ea113be45bb1f6ed38db438e271c5c2579aa660b04c27ecd3dadc1d8f
3
+ size 9203166530
pytorch_model.bin.index.json ADDED
@@ -0,0 +1,451 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 28997406720
4
+ },
5
+ "weight_map": {
6
+ "lm_head.weight": "pytorch_model-00003-of-00003.bin",
7
+ "model.embed_tokens.weight": "pytorch_model-00001-of-00003.bin",
8
+ "model.layers.0.input_layernorm.bias": "pytorch_model-00001-of-00003.bin",
9
+ "model.layers.0.input_layernorm.weight": "pytorch_model-00001-of-00003.bin",
10
+ "model.layers.0.mlp.down_proj.weight": "pytorch_model-00001-of-00003.bin",
11
+ "model.layers.0.mlp.gate_proj.weight": "pytorch_model-00001-of-00003.bin",
12
+ "model.layers.0.mlp.up_proj.weight": "pytorch_model-00001-of-00003.bin",
13
+ "model.layers.0.post_attention_layernorm.bias": "pytorch_model-00001-of-00003.bin",
14
+ "model.layers.0.post_attention_layernorm.weight": "pytorch_model-00001-of-00003.bin",
15
+ "model.layers.0.self_attn.k_proj.weight": "pytorch_model-00001-of-00003.bin",
16
+ "model.layers.0.self_attn.o_proj.weight": "pytorch_model-00001-of-00003.bin",
17
+ "model.layers.0.self_attn.q_proj.weight": "pytorch_model-00001-of-00003.bin",
18
+ "model.layers.0.self_attn.v_proj.weight": "pytorch_model-00001-of-00003.bin",
19
+ "model.layers.1.input_layernorm.bias": "pytorch_model-00001-of-00003.bin",
20
+ "model.layers.1.input_layernorm.weight": "pytorch_model-00001-of-00003.bin",
21
+ "model.layers.1.mlp.down_proj.weight": "pytorch_model-00001-of-00003.bin",
22
+ "model.layers.1.mlp.gate_proj.weight": "pytorch_model-00001-of-00003.bin",
23
+ "model.layers.1.mlp.up_proj.weight": "pytorch_model-00001-of-00003.bin",
24
+ "model.layers.1.post_attention_layernorm.bias": "pytorch_model-00001-of-00003.bin",
25
+ "model.layers.1.post_attention_layernorm.weight": "pytorch_model-00001-of-00003.bin",
26
+ "model.layers.1.self_attn.k_proj.weight": "pytorch_model-00001-of-00003.bin",
27
+ "model.layers.1.self_attn.o_proj.weight": "pytorch_model-00001-of-00003.bin",
28
+ "model.layers.1.self_attn.q_proj.weight": "pytorch_model-00001-of-00003.bin",
29
+ "model.layers.1.self_attn.v_proj.weight": "pytorch_model-00001-of-00003.bin",
30
+ "model.layers.10.input_layernorm.bias": "pytorch_model-00001-of-00003.bin",
31
+ "model.layers.10.input_layernorm.weight": "pytorch_model-00001-of-00003.bin",
32
+ "model.layers.10.mlp.down_proj.weight": "pytorch_model-00001-of-00003.bin",
33
+ "model.layers.10.mlp.gate_proj.weight": "pytorch_model-00001-of-00003.bin",
34
+ "model.layers.10.mlp.up_proj.weight": "pytorch_model-00001-of-00003.bin",
35
+ "model.layers.10.post_attention_layernorm.bias": "pytorch_model-00001-of-00003.bin",
36
+ "model.layers.10.post_attention_layernorm.weight": "pytorch_model-00001-of-00003.bin",
37
+ "model.layers.10.self_attn.k_proj.weight": "pytorch_model-00001-of-00003.bin",
38
+ "model.layers.10.self_attn.o_proj.weight": "pytorch_model-00001-of-00003.bin",
39
+ "model.layers.10.self_attn.q_proj.weight": "pytorch_model-00001-of-00003.bin",
40
+ "model.layers.10.self_attn.v_proj.weight": "pytorch_model-00001-of-00003.bin",
41
+ "model.layers.11.input_layernorm.bias": "pytorch_model-00001-of-00003.bin",
42
+ "model.layers.11.input_layernorm.weight": "pytorch_model-00001-of-00003.bin",
43
+ "model.layers.11.mlp.down_proj.weight": "pytorch_model-00001-of-00003.bin",
44
+ "model.layers.11.mlp.gate_proj.weight": "pytorch_model-00001-of-00003.bin",
45
+ "model.layers.11.mlp.up_proj.weight": "pytorch_model-00001-of-00003.bin",
46
+ "model.layers.11.post_attention_layernorm.bias": "pytorch_model-00001-of-00003.bin",
47
+ "model.layers.11.post_attention_layernorm.weight": "pytorch_model-00001-of-00003.bin",
48
+ "model.layers.11.self_attn.k_proj.weight": "pytorch_model-00001-of-00003.bin",
49
+ "model.layers.11.self_attn.o_proj.weight": "pytorch_model-00001-of-00003.bin",
50
+ "model.layers.11.self_attn.q_proj.weight": "pytorch_model-00001-of-00003.bin",
51
+ "model.layers.11.self_attn.v_proj.weight": "pytorch_model-00001-of-00003.bin",
52
+ "model.layers.12.input_layernorm.bias": "pytorch_model-00001-of-00003.bin",
53
+ "model.layers.12.input_layernorm.weight": "pytorch_model-00001-of-00003.bin",
54
+ "model.layers.12.mlp.down_proj.weight": "pytorch_model-00001-of-00003.bin",
55
+ "model.layers.12.mlp.gate_proj.weight": "pytorch_model-00001-of-00003.bin",
56
+ "model.layers.12.mlp.up_proj.weight": "pytorch_model-00001-of-00003.bin",
57
+ "model.layers.12.post_attention_layernorm.bias": "pytorch_model-00001-of-00003.bin",
58
+ "model.layers.12.post_attention_layernorm.weight": "pytorch_model-00001-of-00003.bin",
59
+ "model.layers.12.self_attn.k_proj.weight": "pytorch_model-00001-of-00003.bin",
60
+ "model.layers.12.self_attn.o_proj.weight": "pytorch_model-00001-of-00003.bin",
61
+ "model.layers.12.self_attn.q_proj.weight": "pytorch_model-00001-of-00003.bin",
62
+ "model.layers.12.self_attn.v_proj.weight": "pytorch_model-00001-of-00003.bin",
63
+ "model.layers.13.input_layernorm.bias": "pytorch_model-00002-of-00003.bin",
64
+ "model.layers.13.input_layernorm.weight": "pytorch_model-00002-of-00003.bin",
65
+ "model.layers.13.mlp.down_proj.weight": "pytorch_model-00002-of-00003.bin",
66
+ "model.layers.13.mlp.gate_proj.weight": "pytorch_model-00002-of-00003.bin",
67
+ "model.layers.13.mlp.up_proj.weight": "pytorch_model-00002-of-00003.bin",
68
+ "model.layers.13.post_attention_layernorm.bias": "pytorch_model-00002-of-00003.bin",
69
+ "model.layers.13.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin",
70
+ "model.layers.13.self_attn.k_proj.weight": "pytorch_model-00001-of-00003.bin",
71
+ "model.layers.13.self_attn.o_proj.weight": "pytorch_model-00001-of-00003.bin",
72
+ "model.layers.13.self_attn.q_proj.weight": "pytorch_model-00001-of-00003.bin",
73
+ "model.layers.13.self_attn.v_proj.weight": "pytorch_model-00001-of-00003.bin",
74
+ "model.layers.14.input_layernorm.bias": "pytorch_model-00002-of-00003.bin",
75
+ "model.layers.14.input_layernorm.weight": "pytorch_model-00002-of-00003.bin",
76
+ "model.layers.14.mlp.down_proj.weight": "pytorch_model-00002-of-00003.bin",
77
+ "model.layers.14.mlp.gate_proj.weight": "pytorch_model-00002-of-00003.bin",
78
+ "model.layers.14.mlp.up_proj.weight": "pytorch_model-00002-of-00003.bin",
79
+ "model.layers.14.post_attention_layernorm.bias": "pytorch_model-00002-of-00003.bin",
80
+ "model.layers.14.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin",
81
+ "model.layers.14.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin",
82
+ "model.layers.14.self_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin",
83
+ "model.layers.14.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin",
84
+ "model.layers.14.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin",
85
+ "model.layers.15.input_layernorm.bias": "pytorch_model-00002-of-00003.bin",
86
+ "model.layers.15.input_layernorm.weight": "pytorch_model-00002-of-00003.bin",
87
+ "model.layers.15.mlp.down_proj.weight": "pytorch_model-00002-of-00003.bin",
88
+ "model.layers.15.mlp.gate_proj.weight": "pytorch_model-00002-of-00003.bin",
89
+ "model.layers.15.mlp.up_proj.weight": "pytorch_model-00002-of-00003.bin",
90
+ "model.layers.15.post_attention_layernorm.bias": "pytorch_model-00002-of-00003.bin",
91
+ "model.layers.15.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin",
92
+ "model.layers.15.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin",
93
+ "model.layers.15.self_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin",
94
+ "model.layers.15.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin",
95
+ "model.layers.15.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin",
96
+ "model.layers.16.input_layernorm.bias": "pytorch_model-00002-of-00003.bin",
97
+ "model.layers.16.input_layernorm.weight": "pytorch_model-00002-of-00003.bin",
98
+ "model.layers.16.mlp.down_proj.weight": "pytorch_model-00002-of-00003.bin",
99
+ "model.layers.16.mlp.gate_proj.weight": "pytorch_model-00002-of-00003.bin",
100
+ "model.layers.16.mlp.up_proj.weight": "pytorch_model-00002-of-00003.bin",
101
+ "model.layers.16.post_attention_layernorm.bias": "pytorch_model-00002-of-00003.bin",
102
+ "model.layers.16.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin",
103
+ "model.layers.16.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin",
104
+ "model.layers.16.self_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin",
105
+ "model.layers.16.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin",
106
+ "model.layers.16.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin",
107
+ "model.layers.17.input_layernorm.bias": "pytorch_model-00002-of-00003.bin",
108
+ "model.layers.17.input_layernorm.weight": "pytorch_model-00002-of-00003.bin",
109
+ "model.layers.17.mlp.down_proj.weight": "pytorch_model-00002-of-00003.bin",
110
+ "model.layers.17.mlp.gate_proj.weight": "pytorch_model-00002-of-00003.bin",
111
+ "model.layers.17.mlp.up_proj.weight": "pytorch_model-00002-of-00003.bin",
112
+ "model.layers.17.post_attention_layernorm.bias": "pytorch_model-00002-of-00003.bin",
113
+ "model.layers.17.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin",
114
+ "model.layers.17.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin",
115
+ "model.layers.17.self_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin",
116
+ "model.layers.17.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin",
117
+ "model.layers.17.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin",
118
+ "model.layers.18.input_layernorm.bias": "pytorch_model-00002-of-00003.bin",
119
+ "model.layers.18.input_layernorm.weight": "pytorch_model-00002-of-00003.bin",
120
+ "model.layers.18.mlp.down_proj.weight": "pytorch_model-00002-of-00003.bin",
121
+ "model.layers.18.mlp.gate_proj.weight": "pytorch_model-00002-of-00003.bin",
122
+ "model.layers.18.mlp.up_proj.weight": "pytorch_model-00002-of-00003.bin",
123
+ "model.layers.18.post_attention_layernorm.bias": "pytorch_model-00002-of-00003.bin",
124
+ "model.layers.18.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin",
125
+ "model.layers.18.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin",
126
+ "model.layers.18.self_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin",
127
+ "model.layers.18.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin",
128
+ "model.layers.18.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin",
129
+ "model.layers.19.input_layernorm.bias": "pytorch_model-00002-of-00003.bin",
130
+ "model.layers.19.input_layernorm.weight": "pytorch_model-00002-of-00003.bin",
131
+ "model.layers.19.mlp.down_proj.weight": "pytorch_model-00002-of-00003.bin",
132
+ "model.layers.19.mlp.gate_proj.weight": "pytorch_model-00002-of-00003.bin",
133
+ "model.layers.19.mlp.up_proj.weight": "pytorch_model-00002-of-00003.bin",
134
+ "model.layers.19.post_attention_layernorm.bias": "pytorch_model-00002-of-00003.bin",
135
+ "model.layers.19.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin",
136
+ "model.layers.19.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin",
137
+ "model.layers.19.self_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin",
138
+ "model.layers.19.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin",
139
+ "model.layers.19.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin",
140
+ "model.layers.2.input_layernorm.bias": "pytorch_model-00001-of-00003.bin",
141
+ "model.layers.2.input_layernorm.weight": "pytorch_model-00001-of-00003.bin",
142
+ "model.layers.2.mlp.down_proj.weight": "pytorch_model-00001-of-00003.bin",
143
+ "model.layers.2.mlp.gate_proj.weight": "pytorch_model-00001-of-00003.bin",
144
+ "model.layers.2.mlp.up_proj.weight": "pytorch_model-00001-of-00003.bin",
145
+ "model.layers.2.post_attention_layernorm.bias": "pytorch_model-00001-of-00003.bin",
146
+ "model.layers.2.post_attention_layernorm.weight": "pytorch_model-00001-of-00003.bin",
147
+ "model.layers.2.self_attn.k_proj.weight": "pytorch_model-00001-of-00003.bin",
148
+ "model.layers.2.self_attn.o_proj.weight": "pytorch_model-00001-of-00003.bin",
149
+ "model.layers.2.self_attn.q_proj.weight": "pytorch_model-00001-of-00003.bin",
150
+ "model.layers.2.self_attn.v_proj.weight": "pytorch_model-00001-of-00003.bin",
151
+ "model.layers.20.input_layernorm.bias": "pytorch_model-00002-of-00003.bin",
152
+ "model.layers.20.input_layernorm.weight": "pytorch_model-00002-of-00003.bin",
153
+ "model.layers.20.mlp.down_proj.weight": "pytorch_model-00002-of-00003.bin",
154
+ "model.layers.20.mlp.gate_proj.weight": "pytorch_model-00002-of-00003.bin",
155
+ "model.layers.20.mlp.up_proj.weight": "pytorch_model-00002-of-00003.bin",
156
+ "model.layers.20.post_attention_layernorm.bias": "pytorch_model-00002-of-00003.bin",
157
+ "model.layers.20.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin",
158
+ "model.layers.20.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin",
159
+ "model.layers.20.self_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin",
160
+ "model.layers.20.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin",
161
+ "model.layers.20.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin",
162
+ "model.layers.21.input_layernorm.bias": "pytorch_model-00002-of-00003.bin",
163
+ "model.layers.21.input_layernorm.weight": "pytorch_model-00002-of-00003.bin",
164
+ "model.layers.21.mlp.down_proj.weight": "pytorch_model-00002-of-00003.bin",
165
+ "model.layers.21.mlp.gate_proj.weight": "pytorch_model-00002-of-00003.bin",
166
+ "model.layers.21.mlp.up_proj.weight": "pytorch_model-00002-of-00003.bin",
167
+ "model.layers.21.post_attention_layernorm.bias": "pytorch_model-00002-of-00003.bin",
168
+ "model.layers.21.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin",
169
+ "model.layers.21.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin",
170
+ "model.layers.21.self_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin",
171
+ "model.layers.21.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin",
172
+ "model.layers.21.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin",
173
+ "model.layers.22.input_layernorm.bias": "pytorch_model-00002-of-00003.bin",
174
+ "model.layers.22.input_layernorm.weight": "pytorch_model-00002-of-00003.bin",
175
+ "model.layers.22.mlp.down_proj.weight": "pytorch_model-00002-of-00003.bin",
176
+ "model.layers.22.mlp.gate_proj.weight": "pytorch_model-00002-of-00003.bin",
177
+ "model.layers.22.mlp.up_proj.weight": "pytorch_model-00002-of-00003.bin",
178
+ "model.layers.22.post_attention_layernorm.bias": "pytorch_model-00002-of-00003.bin",
179
+ "model.layers.22.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin",
180
+ "model.layers.22.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin",
181
+ "model.layers.22.self_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin",
182
+ "model.layers.22.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin",
183
+ "model.layers.22.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin",
184
+ "model.layers.23.input_layernorm.bias": "pytorch_model-00002-of-00003.bin",
185
+ "model.layers.23.input_layernorm.weight": "pytorch_model-00002-of-00003.bin",
186
+ "model.layers.23.mlp.down_proj.weight": "pytorch_model-00002-of-00003.bin",
187
+ "model.layers.23.mlp.gate_proj.weight": "pytorch_model-00002-of-00003.bin",
188
+ "model.layers.23.mlp.up_proj.weight": "pytorch_model-00002-of-00003.bin",
189
+ "model.layers.23.post_attention_layernorm.bias": "pytorch_model-00002-of-00003.bin",
190
+ "model.layers.23.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin",
191
+ "model.layers.23.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin",
192
+ "model.layers.23.self_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin",
193
+ "model.layers.23.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin",
194
+ "model.layers.23.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin",
195
+ "model.layers.24.input_layernorm.bias": "pytorch_model-00002-of-00003.bin",
196
+ "model.layers.24.input_layernorm.weight": "pytorch_model-00002-of-00003.bin",
197
+ "model.layers.24.mlp.down_proj.weight": "pytorch_model-00002-of-00003.bin",
198
+ "model.layers.24.mlp.gate_proj.weight": "pytorch_model-00002-of-00003.bin",
199
+ "model.layers.24.mlp.up_proj.weight": "pytorch_model-00002-of-00003.bin",
200
+ "model.layers.24.post_attention_layernorm.bias": "pytorch_model-00002-of-00003.bin",
201
+ "model.layers.24.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin",
202
+ "model.layers.24.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin",
203
+ "model.layers.24.self_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin",
204
+ "model.layers.24.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin",
205
+ "model.layers.24.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin",
206
+ "model.layers.25.input_layernorm.bias": "pytorch_model-00002-of-00003.bin",
207
+ "model.layers.25.input_layernorm.weight": "pytorch_model-00002-of-00003.bin",
208
+ "model.layers.25.mlp.down_proj.weight": "pytorch_model-00002-of-00003.bin",
209
+ "model.layers.25.mlp.gate_proj.weight": "pytorch_model-00002-of-00003.bin",
210
+ "model.layers.25.mlp.up_proj.weight": "pytorch_model-00002-of-00003.bin",
211
+ "model.layers.25.post_attention_layernorm.bias": "pytorch_model-00002-of-00003.bin",
212
+ "model.layers.25.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin",
213
+ "model.layers.25.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin",
214
+ "model.layers.25.self_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin",
215
+ "model.layers.25.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin",
216
+ "model.layers.25.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin",
217
+ "model.layers.26.input_layernorm.bias": "pytorch_model-00002-of-00003.bin",
218
+ "model.layers.26.input_layernorm.weight": "pytorch_model-00002-of-00003.bin",
219
+ "model.layers.26.mlp.down_proj.weight": "pytorch_model-00002-of-00003.bin",
220
+ "model.layers.26.mlp.gate_proj.weight": "pytorch_model-00002-of-00003.bin",
221
+ "model.layers.26.mlp.up_proj.weight": "pytorch_model-00002-of-00003.bin",
222
+ "model.layers.26.post_attention_layernorm.bias": "pytorch_model-00002-of-00003.bin",
223
+ "model.layers.26.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin",
224
+ "model.layers.26.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin",
225
+ "model.layers.26.self_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin",
226
+ "model.layers.26.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin",
227
+ "model.layers.26.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin",
228
+ "model.layers.27.input_layernorm.bias": "pytorch_model-00003-of-00003.bin",
229
+ "model.layers.27.input_layernorm.weight": "pytorch_model-00003-of-00003.bin",
230
+ "model.layers.27.mlp.down_proj.weight": "pytorch_model-00003-of-00003.bin",
231
+ "model.layers.27.mlp.gate_proj.weight": "pytorch_model-00002-of-00003.bin",
232
+ "model.layers.27.mlp.up_proj.weight": "pytorch_model-00002-of-00003.bin",
233
+ "model.layers.27.post_attention_layernorm.bias": "pytorch_model-00003-of-00003.bin",
234
+ "model.layers.27.post_attention_layernorm.weight": "pytorch_model-00003-of-00003.bin",
235
+ "model.layers.27.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin",
236
+ "model.layers.27.self_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin",
237
+ "model.layers.27.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin",
238
+ "model.layers.27.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin",
239
+ "model.layers.28.input_layernorm.bias": "pytorch_model-00003-of-00003.bin",
240
+ "model.layers.28.input_layernorm.weight": "pytorch_model-00003-of-00003.bin",
241
+ "model.layers.28.mlp.down_proj.weight": "pytorch_model-00003-of-00003.bin",
242
+ "model.layers.28.mlp.gate_proj.weight": "pytorch_model-00003-of-00003.bin",
243
+ "model.layers.28.mlp.up_proj.weight": "pytorch_model-00003-of-00003.bin",
244
+ "model.layers.28.post_attention_layernorm.bias": "pytorch_model-00003-of-00003.bin",
245
+ "model.layers.28.post_attention_layernorm.weight": "pytorch_model-00003-of-00003.bin",
246
+ "model.layers.28.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
247
+ "model.layers.28.self_attn.o_proj.weight": "pytorch_model-00003-of-00003.bin",
248
+ "model.layers.28.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
249
+ "model.layers.28.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
250
+ "model.layers.29.input_layernorm.bias": "pytorch_model-00003-of-00003.bin",
251
+ "model.layers.29.input_layernorm.weight": "pytorch_model-00003-of-00003.bin",
252
+ "model.layers.29.mlp.down_proj.weight": "pytorch_model-00003-of-00003.bin",
253
+ "model.layers.29.mlp.gate_proj.weight": "pytorch_model-00003-of-00003.bin",
254
+ "model.layers.29.mlp.up_proj.weight": "pytorch_model-00003-of-00003.bin",
255
+ "model.layers.29.post_attention_layernorm.bias": "pytorch_model-00003-of-00003.bin",
256
+ "model.layers.29.post_attention_layernorm.weight": "pytorch_model-00003-of-00003.bin",
257
+ "model.layers.29.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
258
+ "model.layers.29.self_attn.o_proj.weight": "pytorch_model-00003-of-00003.bin",
259
+ "model.layers.29.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
260
+ "model.layers.29.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
261
+ "model.layers.3.input_layernorm.bias": "pytorch_model-00001-of-00003.bin",
262
+ "model.layers.3.input_layernorm.weight": "pytorch_model-00001-of-00003.bin",
263
+ "model.layers.3.mlp.down_proj.weight": "pytorch_model-00001-of-00003.bin",
264
+ "model.layers.3.mlp.gate_proj.weight": "pytorch_model-00001-of-00003.bin",
265
+ "model.layers.3.mlp.up_proj.weight": "pytorch_model-00001-of-00003.bin",
266
+ "model.layers.3.post_attention_layernorm.bias": "pytorch_model-00001-of-00003.bin",
267
+ "model.layers.3.post_attention_layernorm.weight": "pytorch_model-00001-of-00003.bin",
268
+ "model.layers.3.self_attn.k_proj.weight": "pytorch_model-00001-of-00003.bin",
269
+ "model.layers.3.self_attn.o_proj.weight": "pytorch_model-00001-of-00003.bin",
270
+ "model.layers.3.self_attn.q_proj.weight": "pytorch_model-00001-of-00003.bin",
271
+ "model.layers.3.self_attn.v_proj.weight": "pytorch_model-00001-of-00003.bin",
272
+ "model.layers.30.input_layernorm.bias": "pytorch_model-00003-of-00003.bin",
273
+ "model.layers.30.input_layernorm.weight": "pytorch_model-00003-of-00003.bin",
274
+ "model.layers.30.mlp.down_proj.weight": "pytorch_model-00003-of-00003.bin",
275
+ "model.layers.30.mlp.gate_proj.weight": "pytorch_model-00003-of-00003.bin",
276
+ "model.layers.30.mlp.up_proj.weight": "pytorch_model-00003-of-00003.bin",
277
+ "model.layers.30.post_attention_layernorm.bias": "pytorch_model-00003-of-00003.bin",
278
+ "model.layers.30.post_attention_layernorm.weight": "pytorch_model-00003-of-00003.bin",
279
+ "model.layers.30.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
280
+ "model.layers.30.self_attn.o_proj.weight": "pytorch_model-00003-of-00003.bin",
281
+ "model.layers.30.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
282
+ "model.layers.30.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
283
+ "model.layers.31.input_layernorm.bias": "pytorch_model-00003-of-00003.bin",
284
+ "model.layers.31.input_layernorm.weight": "pytorch_model-00003-of-00003.bin",
285
+ "model.layers.31.mlp.down_proj.weight": "pytorch_model-00003-of-00003.bin",
286
+ "model.layers.31.mlp.gate_proj.weight": "pytorch_model-00003-of-00003.bin",
287
+ "model.layers.31.mlp.up_proj.weight": "pytorch_model-00003-of-00003.bin",
288
+ "model.layers.31.post_attention_layernorm.bias": "pytorch_model-00003-of-00003.bin",
289
+ "model.layers.31.post_attention_layernorm.weight": "pytorch_model-00003-of-00003.bin",
290
+ "model.layers.31.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
291
+ "model.layers.31.self_attn.o_proj.weight": "pytorch_model-00003-of-00003.bin",
292
+ "model.layers.31.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
293
+ "model.layers.31.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
294
+ "model.layers.32.input_layernorm.bias": "pytorch_model-00003-of-00003.bin",
295
+ "model.layers.32.input_layernorm.weight": "pytorch_model-00003-of-00003.bin",
296
+ "model.layers.32.mlp.down_proj.weight": "pytorch_model-00003-of-00003.bin",
297
+ "model.layers.32.mlp.gate_proj.weight": "pytorch_model-00003-of-00003.bin",
298
+ "model.layers.32.mlp.up_proj.weight": "pytorch_model-00003-of-00003.bin",
299
+ "model.layers.32.post_attention_layernorm.bias": "pytorch_model-00003-of-00003.bin",
300
+ "model.layers.32.post_attention_layernorm.weight": "pytorch_model-00003-of-00003.bin",
301
+ "model.layers.32.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
302
+ "model.layers.32.self_attn.o_proj.weight": "pytorch_model-00003-of-00003.bin",
303
+ "model.layers.32.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
304
+ "model.layers.32.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
305
+ "model.layers.33.input_layernorm.bias": "pytorch_model-00003-of-00003.bin",
306
+ "model.layers.33.input_layernorm.weight": "pytorch_model-00003-of-00003.bin",
307
+ "model.layers.33.mlp.down_proj.weight": "pytorch_model-00003-of-00003.bin",
308
+ "model.layers.33.mlp.gate_proj.weight": "pytorch_model-00003-of-00003.bin",
309
+ "model.layers.33.mlp.up_proj.weight": "pytorch_model-00003-of-00003.bin",
310
+ "model.layers.33.post_attention_layernorm.bias": "pytorch_model-00003-of-00003.bin",
311
+ "model.layers.33.post_attention_layernorm.weight": "pytorch_model-00003-of-00003.bin",
312
+ "model.layers.33.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
313
+ "model.layers.33.self_attn.o_proj.weight": "pytorch_model-00003-of-00003.bin",
314
+ "model.layers.33.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
315
+ "model.layers.33.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
316
+ "model.layers.34.input_layernorm.bias": "pytorch_model-00003-of-00003.bin",
317
+ "model.layers.34.input_layernorm.weight": "pytorch_model-00003-of-00003.bin",
318
+ "model.layers.34.mlp.down_proj.weight": "pytorch_model-00003-of-00003.bin",
319
+ "model.layers.34.mlp.gate_proj.weight": "pytorch_model-00003-of-00003.bin",
320
+ "model.layers.34.mlp.up_proj.weight": "pytorch_model-00003-of-00003.bin",
321
+ "model.layers.34.post_attention_layernorm.bias": "pytorch_model-00003-of-00003.bin",
322
+ "model.layers.34.post_attention_layernorm.weight": "pytorch_model-00003-of-00003.bin",
323
+ "model.layers.34.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
324
+ "model.layers.34.self_attn.o_proj.weight": "pytorch_model-00003-of-00003.bin",
325
+ "model.layers.34.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
326
+ "model.layers.34.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
327
+ "model.layers.35.input_layernorm.bias": "pytorch_model-00003-of-00003.bin",
328
+ "model.layers.35.input_layernorm.weight": "pytorch_model-00003-of-00003.bin",
329
+ "model.layers.35.mlp.down_proj.weight": "pytorch_model-00003-of-00003.bin",
330
+ "model.layers.35.mlp.gate_proj.weight": "pytorch_model-00003-of-00003.bin",
331
+ "model.layers.35.mlp.up_proj.weight": "pytorch_model-00003-of-00003.bin",
332
+ "model.layers.35.post_attention_layernorm.bias": "pytorch_model-00003-of-00003.bin",
333
+ "model.layers.35.post_attention_layernorm.weight": "pytorch_model-00003-of-00003.bin",
334
+ "model.layers.35.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
335
+ "model.layers.35.self_attn.o_proj.weight": "pytorch_model-00003-of-00003.bin",
336
+ "model.layers.35.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
337
+ "model.layers.35.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
338
+ "model.layers.36.input_layernorm.bias": "pytorch_model-00003-of-00003.bin",
339
+ "model.layers.36.input_layernorm.weight": "pytorch_model-00003-of-00003.bin",
340
+ "model.layers.36.mlp.down_proj.weight": "pytorch_model-00003-of-00003.bin",
341
+ "model.layers.36.mlp.gate_proj.weight": "pytorch_model-00003-of-00003.bin",
342
+ "model.layers.36.mlp.up_proj.weight": "pytorch_model-00003-of-00003.bin",
343
+ "model.layers.36.post_attention_layernorm.bias": "pytorch_model-00003-of-00003.bin",
344
+ "model.layers.36.post_attention_layernorm.weight": "pytorch_model-00003-of-00003.bin",
345
+ "model.layers.36.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
346
+ "model.layers.36.self_attn.o_proj.weight": "pytorch_model-00003-of-00003.bin",
347
+ "model.layers.36.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
348
+ "model.layers.36.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
349
+ "model.layers.37.input_layernorm.bias": "pytorch_model-00003-of-00003.bin",
350
+ "model.layers.37.input_layernorm.weight": "pytorch_model-00003-of-00003.bin",
351
+ "model.layers.37.mlp.down_proj.weight": "pytorch_model-00003-of-00003.bin",
352
+ "model.layers.37.mlp.gate_proj.weight": "pytorch_model-00003-of-00003.bin",
353
+ "model.layers.37.mlp.up_proj.weight": "pytorch_model-00003-of-00003.bin",
354
+ "model.layers.37.post_attention_layernorm.bias": "pytorch_model-00003-of-00003.bin",
355
+ "model.layers.37.post_attention_layernorm.weight": "pytorch_model-00003-of-00003.bin",
356
+ "model.layers.37.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
357
+ "model.layers.37.self_attn.o_proj.weight": "pytorch_model-00003-of-00003.bin",
358
+ "model.layers.37.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
359
+ "model.layers.37.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
360
+ "model.layers.38.input_layernorm.bias": "pytorch_model-00003-of-00003.bin",
361
+ "model.layers.38.input_layernorm.weight": "pytorch_model-00003-of-00003.bin",
362
+ "model.layers.38.mlp.down_proj.weight": "pytorch_model-00003-of-00003.bin",
363
+ "model.layers.38.mlp.gate_proj.weight": "pytorch_model-00003-of-00003.bin",
364
+ "model.layers.38.mlp.up_proj.weight": "pytorch_model-00003-of-00003.bin",
365
+ "model.layers.38.post_attention_layernorm.bias": "pytorch_model-00003-of-00003.bin",
366
+ "model.layers.38.post_attention_layernorm.weight": "pytorch_model-00003-of-00003.bin",
367
+ "model.layers.38.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
368
+ "model.layers.38.self_attn.o_proj.weight": "pytorch_model-00003-of-00003.bin",
369
+ "model.layers.38.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
370
+ "model.layers.38.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
371
+ "model.layers.39.input_layernorm.bias": "pytorch_model-00003-of-00003.bin",
372
+ "model.layers.39.input_layernorm.weight": "pytorch_model-00003-of-00003.bin",
373
+ "model.layers.39.mlp.down_proj.weight": "pytorch_model-00003-of-00003.bin",
374
+ "model.layers.39.mlp.gate_proj.weight": "pytorch_model-00003-of-00003.bin",
375
+ "model.layers.39.mlp.up_proj.weight": "pytorch_model-00003-of-00003.bin",
376
+ "model.layers.39.post_attention_layernorm.bias": "pytorch_model-00003-of-00003.bin",
377
+ "model.layers.39.post_attention_layernorm.weight": "pytorch_model-00003-of-00003.bin",
378
+ "model.layers.39.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
379
+ "model.layers.39.self_attn.o_proj.weight": "pytorch_model-00003-of-00003.bin",
380
+ "model.layers.39.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
381
+ "model.layers.39.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
382
+ "model.layers.4.input_layernorm.bias": "pytorch_model-00001-of-00003.bin",
383
+ "model.layers.4.input_layernorm.weight": "pytorch_model-00001-of-00003.bin",
384
+ "model.layers.4.mlp.down_proj.weight": "pytorch_model-00001-of-00003.bin",
385
+ "model.layers.4.mlp.gate_proj.weight": "pytorch_model-00001-of-00003.bin",
386
+ "model.layers.4.mlp.up_proj.weight": "pytorch_model-00001-of-00003.bin",
387
+ "model.layers.4.post_attention_layernorm.bias": "pytorch_model-00001-of-00003.bin",
388
+ "model.layers.4.post_attention_layernorm.weight": "pytorch_model-00001-of-00003.bin",
389
+ "model.layers.4.self_attn.k_proj.weight": "pytorch_model-00001-of-00003.bin",
390
+ "model.layers.4.self_attn.o_proj.weight": "pytorch_model-00001-of-00003.bin",
391
+ "model.layers.4.self_attn.q_proj.weight": "pytorch_model-00001-of-00003.bin",
392
+ "model.layers.4.self_attn.v_proj.weight": "pytorch_model-00001-of-00003.bin",
393
+ "model.layers.5.input_layernorm.bias": "pytorch_model-00001-of-00003.bin",
394
+ "model.layers.5.input_layernorm.weight": "pytorch_model-00001-of-00003.bin",
395
+ "model.layers.5.mlp.down_proj.weight": "pytorch_model-00001-of-00003.bin",
396
+ "model.layers.5.mlp.gate_proj.weight": "pytorch_model-00001-of-00003.bin",
397
+ "model.layers.5.mlp.up_proj.weight": "pytorch_model-00001-of-00003.bin",
398
+ "model.layers.5.post_attention_layernorm.bias": "pytorch_model-00001-of-00003.bin",
399
+ "model.layers.5.post_attention_layernorm.weight": "pytorch_model-00001-of-00003.bin",
400
+ "model.layers.5.self_attn.k_proj.weight": "pytorch_model-00001-of-00003.bin",
401
+ "model.layers.5.self_attn.o_proj.weight": "pytorch_model-00001-of-00003.bin",
402
+ "model.layers.5.self_attn.q_proj.weight": "pytorch_model-00001-of-00003.bin",
403
+ "model.layers.5.self_attn.v_proj.weight": "pytorch_model-00001-of-00003.bin",
404
+ "model.layers.6.input_layernorm.bias": "pytorch_model-00001-of-00003.bin",
405
+ "model.layers.6.input_layernorm.weight": "pytorch_model-00001-of-00003.bin",
406
+ "model.layers.6.mlp.down_proj.weight": "pytorch_model-00001-of-00003.bin",
407
+ "model.layers.6.mlp.gate_proj.weight": "pytorch_model-00001-of-00003.bin",
408
+ "model.layers.6.mlp.up_proj.weight": "pytorch_model-00001-of-00003.bin",
409
+ "model.layers.6.post_attention_layernorm.bias": "pytorch_model-00001-of-00003.bin",
410
+ "model.layers.6.post_attention_layernorm.weight": "pytorch_model-00001-of-00003.bin",
411
+ "model.layers.6.self_attn.k_proj.weight": "pytorch_model-00001-of-00003.bin",
412
+ "model.layers.6.self_attn.o_proj.weight": "pytorch_model-00001-of-00003.bin",
413
+ "model.layers.6.self_attn.q_proj.weight": "pytorch_model-00001-of-00003.bin",
414
+ "model.layers.6.self_attn.v_proj.weight": "pytorch_model-00001-of-00003.bin",
415
+ "model.layers.7.input_layernorm.bias": "pytorch_model-00001-of-00003.bin",
416
+ "model.layers.7.input_layernorm.weight": "pytorch_model-00001-of-00003.bin",
417
+ "model.layers.7.mlp.down_proj.weight": "pytorch_model-00001-of-00003.bin",
418
+ "model.layers.7.mlp.gate_proj.weight": "pytorch_model-00001-of-00003.bin",
419
+ "model.layers.7.mlp.up_proj.weight": "pytorch_model-00001-of-00003.bin",
420
+ "model.layers.7.post_attention_layernorm.bias": "pytorch_model-00001-of-00003.bin",
421
+ "model.layers.7.post_attention_layernorm.weight": "pytorch_model-00001-of-00003.bin",
422
+ "model.layers.7.self_attn.k_proj.weight": "pytorch_model-00001-of-00003.bin",
423
+ "model.layers.7.self_attn.o_proj.weight": "pytorch_model-00001-of-00003.bin",
424
+ "model.layers.7.self_attn.q_proj.weight": "pytorch_model-00001-of-00003.bin",
425
+ "model.layers.7.self_attn.v_proj.weight": "pytorch_model-00001-of-00003.bin",
426
+ "model.layers.8.input_layernorm.bias": "pytorch_model-00001-of-00003.bin",
427
+ "model.layers.8.input_layernorm.weight": "pytorch_model-00001-of-00003.bin",
428
+ "model.layers.8.mlp.down_proj.weight": "pytorch_model-00001-of-00003.bin",
429
+ "model.layers.8.mlp.gate_proj.weight": "pytorch_model-00001-of-00003.bin",
430
+ "model.layers.8.mlp.up_proj.weight": "pytorch_model-00001-of-00003.bin",
431
+ "model.layers.8.post_attention_layernorm.bias": "pytorch_model-00001-of-00003.bin",
432
+ "model.layers.8.post_attention_layernorm.weight": "pytorch_model-00001-of-00003.bin",
433
+ "model.layers.8.self_attn.k_proj.weight": "pytorch_model-00001-of-00003.bin",
434
+ "model.layers.8.self_attn.o_proj.weight": "pytorch_model-00001-of-00003.bin",
435
+ "model.layers.8.self_attn.q_proj.weight": "pytorch_model-00001-of-00003.bin",
436
+ "model.layers.8.self_attn.v_proj.weight": "pytorch_model-00001-of-00003.bin",
437
+ "model.layers.9.input_layernorm.bias": "pytorch_model-00001-of-00003.bin",
438
+ "model.layers.9.input_layernorm.weight": "pytorch_model-00001-of-00003.bin",
439
+ "model.layers.9.mlp.down_proj.weight": "pytorch_model-00001-of-00003.bin",
440
+ "model.layers.9.mlp.gate_proj.weight": "pytorch_model-00001-of-00003.bin",
441
+ "model.layers.9.mlp.up_proj.weight": "pytorch_model-00001-of-00003.bin",
442
+ "model.layers.9.post_attention_layernorm.bias": "pytorch_model-00001-of-00003.bin",
443
+ "model.layers.9.post_attention_layernorm.weight": "pytorch_model-00001-of-00003.bin",
444
+ "model.layers.9.self_attn.k_proj.weight": "pytorch_model-00001-of-00003.bin",
445
+ "model.layers.9.self_attn.o_proj.weight": "pytorch_model-00001-of-00003.bin",
446
+ "model.layers.9.self_attn.q_proj.weight": "pytorch_model-00001-of-00003.bin",
447
+ "model.layers.9.self_attn.v_proj.weight": "pytorch_model-00001-of-00003.bin",
448
+ "model.norm.bias": "pytorch_model-00003-of-00003.bin",
449
+ "model.norm.weight": "pytorch_model-00003-of-00003.bin"
450
+ }
451
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": true
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": true
15
+ },
16
+ "unk_token": {
17
+ "content": "<unk>",
18
+ "lstrip": false,
19
+ "normalized": true,
20
+ "rstrip": false,
21
+ "single_word": true
22
+ },
23
+ "pad_token": {
24
+ "content": "<unk>",
25
+ "lstrip": false,
26
+ "normalized": true,
27
+ "rstrip": false,
28
+ "single_word": true
29
+ }
30
+ }
tokenization_orion.py ADDED
@@ -0,0 +1,255 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024, OrionStar Inc. All rights reserved.
2
+
3
+ import os
4
+ from shutil import copyfile
5
+ from typing import Any, Dict, List, Optional, Tuple
6
+
7
+ import sentencepiece as spm
8
+ from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer
9
+
10
+
11
+ VOCAB_FILES_NAMES = {"vocab_file": "tokenizer.model"}
12
+
13
+ PRETRAINED_VOCAB_FILES_MAP = {
14
+ "vocab_file": {},
15
+ "tokenizer_file": {},
16
+ }
17
+ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {}
18
+
19
+
20
+ class OrionTokenizer(PreTrainedTokenizer):
21
+ """
22
+ Construct a Orion tokenizer. Based on byte-level Byte-Pair-Encoding.
23
+
24
+ Args:
25
+ vocab_file (`str`):
26
+ Path to the vocabulary file.
27
+ """
28
+
29
+ vocab_files_names = VOCAB_FILES_NAMES
30
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
31
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
32
+ model_input_names = ["input_ids", "attention_mask"]
33
+
34
+ def __init__(
35
+ self,
36
+ vocab_file,
37
+ unk_token="<unk>",
38
+ bos_token="<s>",
39
+ eos_token="</s>",
40
+ pad_token=None,
41
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
42
+ add_bos_token=True,
43
+ add_eos_token=False,
44
+ clean_up_tokenization_spaces=False,
45
+ **kwargs,
46
+ ):
47
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
48
+ bos_token = (
49
+ AddedToken(bos_token, lstrip=False, rstrip=False)
50
+ if isinstance(bos_token, str)
51
+ else bos_token
52
+ )
53
+ eos_token = (
54
+ AddedToken(eos_token, lstrip=False, rstrip=False)
55
+ if isinstance(eos_token, str)
56
+ else eos_token
57
+ )
58
+ unk_token = (
59
+ AddedToken(unk_token, lstrip=False, rstrip=False)
60
+ if isinstance(unk_token, str)
61
+ else unk_token
62
+ )
63
+ pad_token = (
64
+ AddedToken(pad_token, lstrip=False, rstrip=False)
65
+ if isinstance(pad_token, str)
66
+ else pad_token
67
+ )
68
+ self.vocab_file = vocab_file
69
+ self.add_bos_token = add_bos_token
70
+ self.add_eos_token = add_eos_token
71
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
72
+ self.sp_model.Load(vocab_file)
73
+ super().__init__(
74
+ bos_token=bos_token,
75
+ eos_token=eos_token,
76
+ unk_token=unk_token,
77
+ pad_token=pad_token,
78
+ add_bos_token=add_bos_token,
79
+ add_eos_token=add_eos_token,
80
+ sp_model_kwargs=self.sp_model_kwargs,
81
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
82
+ **kwargs,
83
+ )
84
+
85
+ def __getstate__(self):
86
+ state = self.__dict__.copy()
87
+ state["sp_model"] = None
88
+ return state
89
+
90
+ def __setstate__(self, d):
91
+ self.__dict__ = d
92
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
93
+ self.sp_model.Load(self.vocab_file)
94
+
95
+ @property
96
+ def vocab_size(self):
97
+ """Returns vocab size"""
98
+ return self.sp_model.get_piece_size()
99
+
100
+ def get_vocab(self):
101
+ """Returns vocab as a dict"""
102
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
103
+ vocab.update(self.added_tokens_encoder)
104
+ return vocab
105
+
106
+ def _tokenize(self, text):
107
+ """Returns a tokenized string."""
108
+ return self.sp_model.encode(text, out_type=str)
109
+
110
+ def _convert_token_to_id(self, token):
111
+ """Converts a token (str) in an id using the vocab."""
112
+ return self.sp_model.piece_to_id(token)
113
+
114
+ def _convert_id_to_token(self, index):
115
+ """Converts an index (integer) in a token (str) using the vocab."""
116
+ token = self.sp_model.IdToPiece(index)
117
+ return token
118
+
119
+ def convert_tokens_to_string(self, tokens):
120
+ """Converts a sequence of tokens (string) in a single string."""
121
+ current_sub_tokens = []
122
+ out_string = ""
123
+ prev_is_special = False
124
+ for i, token in enumerate(tokens):
125
+ # make sure that special tokens are not decoded using sentencepiece model
126
+ if token in self.all_special_tokens:
127
+ if not prev_is_special and i != 0:
128
+ out_string += " "
129
+ out_string += self.sp_model.decode(current_sub_tokens) + token
130
+ prev_is_special = True
131
+ current_sub_tokens = []
132
+ else:
133
+ current_sub_tokens.append(token)
134
+ prev_is_special = False
135
+ out_string += self.sp_model.decode(current_sub_tokens)
136
+ return out_string
137
+
138
+ def save_vocabulary(
139
+ self, save_directory, filename_prefix: Optional[str] = None
140
+ ) -> Tuple[str]:
141
+ """
142
+ Save the vocabulary and special tokens file to a directory.
143
+
144
+ Args:
145
+ save_directory (`str`):
146
+ The directory in which to save the vocabulary.
147
+
148
+ Returns:
149
+ `Tuple(str)`: Paths to the files saved.
150
+ """
151
+ if not os.path.isdir(save_directory):
152
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
153
+ return
154
+ out_vocab_file = os.path.join(
155
+ save_directory,
156
+ (filename_prefix + "-" if filename_prefix else "")
157
+ + VOCAB_FILES_NAMES["vocab_file"],
158
+ )
159
+
160
+ if os.path.abspath(self.vocab_file) != os.path.abspath(
161
+ out_vocab_file
162
+ ) and os.path.isfile(self.vocab_file):
163
+ copyfile(self.vocab_file, out_vocab_file)
164
+ elif not os.path.isfile(self.vocab_file):
165
+ with open(out_vocab_file, "wb") as fi:
166
+ content_spiece_model = self.sp_model.serialized_model_proto()
167
+ fi.write(content_spiece_model)
168
+
169
+ return (out_vocab_file,)
170
+
171
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
172
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
173
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
174
+
175
+ output = bos_token_id + token_ids_0 + eos_token_id
176
+
177
+ if token_ids_1 is not None:
178
+ output = output + bos_token_id + token_ids_1 + eos_token_id
179
+
180
+ return output
181
+
182
+ def get_special_tokens_mask(
183
+ self,
184
+ token_ids_0: List[int],
185
+ token_ids_1: Optional[List[int]] = None,
186
+ already_has_special_tokens: bool = False,
187
+ ) -> List[int]:
188
+ """
189
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
190
+ special tokens using the tokenizer `prepare_for_model` method.
191
+
192
+ Args:
193
+ token_ids_0 (`List[int]`):
194
+ List of IDs.
195
+ token_ids_1 (`List[int]`, *optional*):
196
+ Optional second list of IDs for sequence pairs.
197
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
198
+ Whether or not the token list is already formatted with special tokens for the model.
199
+
200
+ Returns:
201
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
202
+ """
203
+ if already_has_special_tokens:
204
+ return super().get_special_tokens_mask(
205
+ token_ids_0=token_ids_0,
206
+ token_ids_1=token_ids_1,
207
+ already_has_special_tokens=True,
208
+ )
209
+
210
+ bos_token_id = [1] if self.add_bos_token else []
211
+ eos_token_id = [1] if self.add_eos_token else []
212
+
213
+ if token_ids_1 is None:
214
+ return bos_token_id + ([0] * len(token_ids_0)) + eos_token_id
215
+ return (
216
+ bos_token_id
217
+ + ([0] * len(token_ids_0))
218
+ + eos_token_id
219
+ + bos_token_id
220
+ + ([0] * len(token_ids_1))
221
+ + eos_token_id
222
+ )
223
+
224
+ def create_token_type_ids_from_sequences(
225
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
226
+ ) -> List[int]:
227
+ """
228
+ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT
229
+ sequence pair mask has the following format:
230
+
231
+ ```
232
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
233
+ | first sequence | second sequence |
234
+ ```
235
+
236
+ if token_ids_1 is None, only returns the first portion of the mask (0s).
237
+
238
+ Args:
239
+ token_ids_0 (`List[int]`):
240
+ List of ids.
241
+ token_ids_1 (`List[int]`, *optional*):
242
+ Optional second list of IDs for sequence pairs.
243
+
244
+ Returns:
245
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
246
+ """
247
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
248
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
249
+
250
+ output = [0] * len(bos_token_id + token_ids_0 + eos_token_id)
251
+
252
+ if token_ids_1 is not None:
253
+ output += [1] * len(bos_token_id + token_ids_1 + eos_token_id)
254
+
255
+ return output
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ded43118b7418f56db97a4eed08a5c265c03120158229ddd4fbcc9658241d5f0
3
+ size 1520600
tokenizer_config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_eos_token": false,
4
+ "auto_map": {
5
+ "AutoTokenizer": [
6
+ "tokenization_orion.OrionTokenizer",
7
+ null
8
+ ]
9
+ },
10
+ "bos_token": {
11
+ "__type": "AddedToken",
12
+ "content": "<s>",
13
+ "lstrip": false,
14
+ "normalized": true,
15
+ "rstrip": false,
16
+ "single_word": true
17
+ },
18
+ "clean_up_tokenization_spaces": false,
19
+ "eos_token": {
20
+ "__type": "AddedToken",
21
+ "content": "</s>",
22
+ "lstrip": false,
23
+ "normalized": true,
24
+ "rstrip": false,
25
+ "single_word": true
26
+ },
27
+ "model_max_length": 4096,
28
+ "pad_token": {
29
+ "__type": "AddedToken",
30
+ "content": "<unk>",
31
+ "lstrip": false,
32
+ "normalized": true,
33
+ "rstrip": false,
34
+ "single_word": true
35
+ },
36
+ "sp_model_kwargs": {},
37
+ "tokenizer_class": "OrionTokenizer",
38
+ "unk_token": {
39
+ "__type": "AddedToken",
40
+ "content": "<unk>",
41
+ "lstrip": false,
42
+ "normalized": true,
43
+ "rstrip": false,
44
+ "single_word": true
45
+ }
46
+ }