Devops-hestabit commited on
Commit
e2a25ac
1 Parent(s): 9585e18

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,11 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ pytorch_model-00002-of-00007.bin filter=lfs diff=lfs merge=lfs -text
37
+ pytorch_model-00003-of-00007.bin filter=lfs diff=lfs merge=lfs -text
38
+ pytorch_model-00004-of-00007.bin filter=lfs diff=lfs merge=lfs -text
39
+ pytorch_model-00005-of-00007.bin filter=lfs diff=lfs merge=lfs -text
40
+ pytorch_model-00006-of-00007.bin filter=lfs diff=lfs merge=lfs -text
41
+ pytorch_model-00007-of-00007.bin filter=lfs diff=lfs merge=lfs -text
42
+ pytorch_model-00001-of-00007.bin filter=lfs diff=lfs merge=lfs -text
43
+ tokenizer.model filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,3 +1,278 @@
1
- ---
2
- license: mit
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ library_name: transformers
5
+ pipeline_tag: text-generation
6
+ datasets:
7
+ - jondurbin/airoboros-2.2
8
+ - Open-Orca/OpenOrca
9
+ - garage-bAInd/Open-Platypus
10
+ - WizardLM/WizardLM_evol_instruct_V2_196k
11
+ tags:
12
+ - llama-2
13
+ - code
14
+ license: llama2
15
+ model-index:
16
+ - name: SpeechlessCoder
17
+ results:
18
+ - task:
19
+ type: text-generation
20
+ dataset:
21
+ type: openai_humaneval
22
+ name: HumanEval
23
+ metrics:
24
+ - name: pass@1
25
+ type: pass@1
26
+ value: 75.61
27
+ verified: false
28
+ ---
29
+
30
+ <p><h1> speechless-codellama-34b-v2.0 </h1></p>
31
+
32
+ * [AWQ model(s) for GPU inference.](https://huggingface.co/TheBloke/speechless-codellama-34b-v2.0-AWQ)
33
+ * [GPTQ models for GPU inference, with multiple quantisation parameter options.](https://huggingface.co/TheBloke/speechless-codellama-34b-v2.0-GPTQ)
34
+ * [2, 3, 4, 5, 6 and 8-bit GGUF models for CPU+GPU inference](https://huggingface.co/TheBloke/speechless-codellama-34b-v2.0-GGUF)
35
+
36
+ Code: https://github.com/uukuguy/speechless
37
+
38
+ Use the following datasets to fine-tune codellama/CodeLlama-34B in order to improve the model's inference and planning capabilities.
39
+
40
+ Total 153,013 samples.
41
+ - jondurbin/airoboros-2.2: Filter categories related to coding, reasoning and planning. 23,462 samples.
42
+ - Open-Orca/OpenOrca: Filter the 'cot' category in 1M GPT4 dataset. 74,440 samples.
43
+ - garage-bAInd/Open-Platypus: 100%, 24,926 samples.
44
+ - WizardLM/WizardLM_evol_instruct_V2_196k: Coding coversation part. 30,185 samples
45
+
46
+ ## How to Prompt the Model
47
+ This model accepts the Alpaca instruction format.
48
+
49
+ For example:
50
+ ```
51
+ You are an intelligent programming assistant.
52
+
53
+ ### Instruction:
54
+ Implement a linked list in C++
55
+
56
+ ### Response:
57
+ ```
58
+
59
+
60
+
61
+
62
+ ## HumanEval
63
+
64
+ | human-eval | pass@1 |
65
+ | --- | --- |
66
+ | humaneval-python | 75.61 |
67
+
68
+ [Big Code Models Leaderboard](https://huggingface.co/spaces/bigcode/bigcode-models-leaderboard)
69
+
70
+ | Models | pass@1 |
71
+ |------ | ------ |
72
+ | Phind-CodeLlama-34B-v2| 71.95|
73
+ | WizardCoder-Python-34B-V1.0| 70.73|
74
+ | Phind-CodeLlama-34B-Python-v1| 70.22|
75
+ | Phind-CodeLlama-34B-v1| 65.85|
76
+ | WizardCoder-Python-13B-V1.0| 62.19|
77
+ | WizardCoder-15B-V1.0| 58.12|
78
+ | CodeLlama-34B-Python| 53.29|
79
+ | CodeLlama-34B-Instruct| 50.79|
80
+ | CodeLlama-13B-Instruct| 50.6|
81
+ | CodeLlama-34B| 45.11|
82
+ | CodeLlama-13B-Python| 42.89|
83
+ | CodeLlama-13B| 35.07|
84
+
85
+ ## NL2SQL
86
+
87
+ SQL-EVAL: 125/175 (71.43%)
88
+
89
+ Average rate of exact match: 67.43%
90
+
91
+ Average correct rate: 71.43%
92
+
93
+ - GPT4: 130/175 (74.29%)
94
+ - GPT3-Turbo-0613: 105/174 (60.00%)
95
+
96
+
97
+ ## lm-evaluation-harness
98
+
99
+ [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)
100
+ | Metric | Value |
101
+ | --- | --- |
102
+ | ARC | 54.35 |
103
+ | HellaSwag | 75.65 |
104
+ | MMLU | 54.67 |
105
+ | TruthfulQA | 45.21 |
106
+ | Average | 57.47 |
107
+
108
+
109
+ H800-80G x 2
110
+
111
+ transformers=4.33.0
112
+
113
+ flash-attn=2.1.0
114
+
115
+ bitsandbytes=0.41.1
116
+
117
+ peft=0.5.0
118
+
119
+ ## Training Arguments
120
+ | | |
121
+ |------ | ------ |
122
+ | lr | 2e-4 |
123
+ | lr_scheduler_type | cosine |
124
+ | weight_decay | 0.0 |
125
+ | optim | paged_adamw_8bit |
126
+ | flash_attention | True |
127
+ | rerope | False |
128
+ | max_new_tokens | 8192 |
129
+ | num_train_epochs | 3 |
130
+ | bits | 4 |
131
+ | lora_r | 64 |
132
+ | lora_alpha | 16 |
133
+ | lora_dropout | 0.05 |
134
+ | double_quant | True |
135
+ | quant_type | nf4 |
136
+ | dataset_format | airoboros |
137
+ | mini_batch_size | 4 |
138
+ | grandient_accumulation_steps | 16 |
139
+ | bf16 | True |
140
+
141
+
142
+ | | |
143
+ |------ | ------ |
144
+ | epoch | 3.0 |
145
+ | etrain_loss | 0.4261 |
146
+ | etrain_runtime | 1 day, 14:42:57.87 |
147
+ | etrain_samples_per_second | 3.227 |
148
+ | etrain_steps_per_second | 0.025 |
149
+ | eeval_loss | 0.4537 |
150
+ | eeval_runtime | 0:00:36.19 |
151
+ | eeval_samples_per_second | 5.525 |
152
+ | eeval_steps_per_second | 2.763 |
153
+
154
+
155
+ # **Code Llama**
156
+
157
+ Code Llama is a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 34 billion parameters. This is the repository for the base 13B version in the Hugging Face Transformers format. This model is designed for general code synthesis and understanding. Links to other models can be found in the index at the bottom.
158
+
159
+ | | Base Model | Python | Instruct |
160
+ | --- | ----------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------- |
161
+ | 7B | [codellama/CodeLlama-7b-hf](https://huggingface.co/codellama/CodeLlama-7b-hf) | [codellama/CodeLlama-7b-Python-hf](https://huggingface.co/codellama/CodeLlama-7b-Python-hf) | [codellama/CodeLlama-7b-Instruct-hf](https://huggingface.co/codellama/CodeLlama-7b-Instruct-hf) |
162
+ | 13B | [codellama/CodeLlama-13b-hf](https://huggingface.co/codellama/CodeLlama-13b-hf) | [codellama/CodeLlama-13b-Python-hf](https://huggingface.co/codellama/CodeLlama-13b-Python-hf) | [codellama/CodeLlama-13b-Instruct-hf](https://huggingface.co/codellama/CodeLlama-13b-Instruct-hf) |
163
+ | 34B | [codellama/CodeLlama-34b-hf](https://huggingface.co/codellama/CodeLlama-34b-hf) | [codellama/CodeLlama-34b-Python-hf](https://huggingface.co/codellama/CodeLlama-34b-Python-hf) | [codellama/CodeLlama-34b-Instruct-hf](https://huggingface.co/codellama/CodeLlama-34b-Instruct-hf) |
164
+
165
+
166
+ ## Model Use
167
+
168
+ To use this model, please make sure to install transformers from `main` until the next version is released:
169
+
170
+ ```bash
171
+ pip install git+https://github.com/huggingface/transformers.git@main accelerate
172
+ ```
173
+
174
+ Model capabilities:
175
+
176
+ - [x] Code completion.
177
+ - [x] Infilling.
178
+ - [ ] Instructions / chat.
179
+ - [ ] Python specialist.
180
+
181
+
182
+ ```python
183
+ from transformers import AutoTokenizer
184
+ import transformers
185
+ import torch
186
+
187
+ model = "codellama/CodeLlama-13b-hf"
188
+
189
+ tokenizer = AutoTokenizer.from_pretrained(model)
190
+ pipeline = transformers.pipeline(
191
+ "text-generation",
192
+ model=model,
193
+ torch_dtype=torch.float16,
194
+ device_map="auto",
195
+ )
196
+
197
+ sequences = pipeline(
198
+ 'import socket\n\ndef ping_exponential_backoff(host: str):',
199
+ do_sample=True,
200
+ top_k=10,
201
+ temperature=0.1,
202
+ top_p=0.95,
203
+ num_return_sequences=1,
204
+ eos_token_id=tokenizer.eos_token_id,
205
+ max_length=200,
206
+ )
207
+ for seq in sequences:
208
+ print(f"Result: {seq['generated_text']}")
209
+ ```
210
+
211
+
212
+ ## Model Details
213
+ *Note: Use of this model is governed by the Meta license. Meta developed and publicly released the Code Llama family of large language models (LLMs).
214
+
215
+ **Model Developers** Meta
216
+
217
+ **Variations** Code Llama comes in three model sizes, and three variants:
218
+
219
+ * Code Llama: base models designed for general code synthesis and understanding
220
+ * Code Llama - Python: designed specifically for Python
221
+ * Code Llama - Instruct: for instruction following and safer deployment
222
+
223
+ All variants are available in sizes of 7B, 13B and 34B parameters.
224
+
225
+ **This repository contains the base version of the 13B parameters model.**
226
+
227
+ **Input** Models input text only.
228
+
229
+ **Output** Models generate text only.
230
+
231
+ **Model Architecture** Code Llama is an auto-regressive language model that uses an optimized transformer architecture.
232
+
233
+ **Model Dates** Code Llama and its variants have been trained between January 2023 and July 2023.
234
+
235
+ **Status** This is a static model trained on an offline dataset. Future versions of Code Llama - Instruct will be released as we improve model safety with community feedback.
236
+
237
+ **License** A custom commercial license is available at: [https://ai.meta.com/resources/models-and-libraries/llama-downloads/](https://ai.meta.com/resources/models-and-libraries/llama-downloads/)
238
+
239
+ **Research Paper** More information can be found in the paper "[Code Llama: Open Foundation Models for Code](https://ai.meta.com/research/publications/code-llama-open-foundation-models-for-code/)" or its [arXiv page](https://arxiv.org/abs/2308.12950).
240
+
241
+ ## Intended Use
242
+ **Intended Use Cases** Code Llama and its variants is intended for commercial and research use in English and relevant programming languages. The base model Code Llama can be adapted for a variety of code synthesis and understanding tasks, Code Llama - Python is designed specifically to handle the Python programming language, and Code Llama - Instruct is intended to be safer to use for code assistant and generation applications.
243
+
244
+ **Out-of-Scope Uses** Use in any manner that violates applicable laws or regulations (including trade compliance laws). Use in languages other than English. Use in any other way that is prohibited by the Acceptable Use Policy and Licensing Agreement for Code Llama and its variants.
245
+
246
+ ## Hardware and Software
247
+ **Training Factors** We used custom training libraries. The training and fine-tuning of the released models have been performed Meta’s Research Super Cluster.
248
+
249
+ **Carbon Footprint** In aggregate, training all 9 Code Llama models required 400K GPU hours of computation on hardware of type A100-80GB (TDP of 350-400W). Estimated total emissions were 65.3 tCO2eq, 100% of which were offset by Meta’s sustainability program.
250
+
251
+ ## Training Data
252
+
253
+ All experiments reported here and the released models have been trained and fine-tuned using the same data as Llama 2 with different weights (see Section 2 and Table 1 in the [research paper](https://ai.meta.com/research/publications/code-llama-open-foundation-models-for-code/) for details).
254
+
255
+ ## Evaluation Results
256
+
257
+ See evaluations for the main models and detailed ablations in Section 3 and safety evaluations in Section 4 of the research paper.
258
+
259
+
260
+ ## Ethical Considerations and Limitations
261
+
262
+ Code Llama and its variants are a new technology that carries risks with use. Testing conducted to date has been in English, and has not covered, nor could it cover all scenarios. For these reasons, as with all LLMs, Code Llama’s potential outputs cannot be predicted in advance, and the model may in some instances produce inaccurate or objectionable responses to user prompts. Therefore, before deploying any applications of Code Llama, developers should perform safety testing and tuning tailored to their specific applications of the model.
263
+
264
+ Please see the Responsible Use Guide available available at [https://ai.meta.com/llama/responsible-user-guide](https://ai.meta.com/llama/responsible-user-guide).
265
+
266
+ # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)
267
+ Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_uukuguy__speechless-codellama-34b-v2.0)
268
+
269
+ | Metric | Value |
270
+ |-----------------------|---------------------------|
271
+ | Avg. | 50.96 |
272
+ | ARC (25-shot) | 54.35 |
273
+ | HellaSwag (10-shot) | 75.65 |
274
+ | MMLU (5-shot) | 54.67 |
275
+ | TruthfulQA (0-shot) | 45.21 |
276
+ | Winogrande (5-shot) | 73.56 |
277
+ | GSM8K (5-shot) | 11.6 |
278
+ | DROP (3-shot) | 41.71 |
config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/opt/local/llm_models/huggingface.co/Phind/Phind-CodeLlama-34B-v2",
3
+ "architectures": [
4
+ "LlamaForCausalLM"
5
+ ],
6
+ "bos_token_id": 1,
7
+ "eos_token_id": 2,
8
+ "hidden_act": "silu",
9
+ "hidden_size": 8192,
10
+ "initializer_range": 0.02,
11
+ "intermediate_size": 22016,
12
+ "max_position_embeddings": 16384,
13
+ "model_type": "llama",
14
+ "num_attention_heads": 64,
15
+ "num_hidden_layers": 48,
16
+ "num_key_value_heads": 8,
17
+ "pretraining_tp": 1,
18
+ "rms_norm_eps": 1e-05,
19
+ "rope_scaling": null,
20
+ "rope_theta": 1000000,
21
+ "tie_word_embeddings": false,
22
+ "torch_dtype": "float16",
23
+ "transformers_version": "4.33.2",
24
+ "use_cache": true,
25
+ "vocab_size": 32000
26
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "transformers_version": "4.33.2"
6
+ }
pytorch_model-00001-of-00007.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2b9e36b73469257c3c8903bd1c9701ea9795100ff45de419cbbb0adf0076ee27
3
+ size 9852637497
pytorch_model-00002-of-00007.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3dea603efe349d7f14547cc1c654b227b16fb33cf8bd019739cfcc32fc8b67ed
3
+ size 9689093137
pytorch_model-00003-of-00007.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c54f1451a330b9341def1a6206a9b8407f8b605dfea5a0ebf138409f5fa3f86
3
+ size 9689093137
pytorch_model-00004-of-00007.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:66ebea7379818ef8115dc348b7ee63da6a8a825ad762b371754e5459324f284f
3
+ size 9689093137
pytorch_model-00005-of-00007.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d29774ce11f8b4e20103de92c9ebb456eb809eae5cb87d9e0d6540ca6759bfb4
3
+ size 9689093137
pytorch_model-00006-of-00007.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:37deb74bf6c343959079ecec7dd3c75e483d7b3b8dce462a7a85a929a1f6e739
3
+ size 9689093137
pytorch_model-00007-of-00007.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8132abe490512be0272dd63aa0d4b7879e26194ac8f629c8e582e5ea40163179
3
+ size 9189985945
pytorch_model.bin.index.json ADDED
@@ -0,0 +1,442 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 67487940608
4
+ },
5
+ "weight_map": {
6
+ "lm_head.weight": "pytorch_model-00007-of-00007.bin",
7
+ "model.embed_tokens.weight": "pytorch_model-00001-of-00007.bin",
8
+ "model.layers.0.input_layernorm.weight": "pytorch_model-00001-of-00007.bin",
9
+ "model.layers.0.mlp.down_proj.weight": "pytorch_model-00001-of-00007.bin",
10
+ "model.layers.0.mlp.gate_proj.weight": "pytorch_model-00001-of-00007.bin",
11
+ "model.layers.0.mlp.up_proj.weight": "pytorch_model-00001-of-00007.bin",
12
+ "model.layers.0.post_attention_layernorm.weight": "pytorch_model-00001-of-00007.bin",
13
+ "model.layers.0.self_attn.k_proj.weight": "pytorch_model-00001-of-00007.bin",
14
+ "model.layers.0.self_attn.o_proj.weight": "pytorch_model-00001-of-00007.bin",
15
+ "model.layers.0.self_attn.q_proj.weight": "pytorch_model-00001-of-00007.bin",
16
+ "model.layers.0.self_attn.v_proj.weight": "pytorch_model-00001-of-00007.bin",
17
+ "model.layers.1.input_layernorm.weight": "pytorch_model-00001-of-00007.bin",
18
+ "model.layers.1.mlp.down_proj.weight": "pytorch_model-00001-of-00007.bin",
19
+ "model.layers.1.mlp.gate_proj.weight": "pytorch_model-00001-of-00007.bin",
20
+ "model.layers.1.mlp.up_proj.weight": "pytorch_model-00001-of-00007.bin",
21
+ "model.layers.1.post_attention_layernorm.weight": "pytorch_model-00001-of-00007.bin",
22
+ "model.layers.1.self_attn.k_proj.weight": "pytorch_model-00001-of-00007.bin",
23
+ "model.layers.1.self_attn.o_proj.weight": "pytorch_model-00001-of-00007.bin",
24
+ "model.layers.1.self_attn.q_proj.weight": "pytorch_model-00001-of-00007.bin",
25
+ "model.layers.1.self_attn.v_proj.weight": "pytorch_model-00001-of-00007.bin",
26
+ "model.layers.10.input_layernorm.weight": "pytorch_model-00002-of-00007.bin",
27
+ "model.layers.10.mlp.down_proj.weight": "pytorch_model-00002-of-00007.bin",
28
+ "model.layers.10.mlp.gate_proj.weight": "pytorch_model-00002-of-00007.bin",
29
+ "model.layers.10.mlp.up_proj.weight": "pytorch_model-00002-of-00007.bin",
30
+ "model.layers.10.post_attention_layernorm.weight": "pytorch_model-00002-of-00007.bin",
31
+ "model.layers.10.self_attn.k_proj.weight": "pytorch_model-00002-of-00007.bin",
32
+ "model.layers.10.self_attn.o_proj.weight": "pytorch_model-00002-of-00007.bin",
33
+ "model.layers.10.self_attn.q_proj.weight": "pytorch_model-00002-of-00007.bin",
34
+ "model.layers.10.self_attn.v_proj.weight": "pytorch_model-00002-of-00007.bin",
35
+ "model.layers.11.input_layernorm.weight": "pytorch_model-00002-of-00007.bin",
36
+ "model.layers.11.mlp.down_proj.weight": "pytorch_model-00002-of-00007.bin",
37
+ "model.layers.11.mlp.gate_proj.weight": "pytorch_model-00002-of-00007.bin",
38
+ "model.layers.11.mlp.up_proj.weight": "pytorch_model-00002-of-00007.bin",
39
+ "model.layers.11.post_attention_layernorm.weight": "pytorch_model-00002-of-00007.bin",
40
+ "model.layers.11.self_attn.k_proj.weight": "pytorch_model-00002-of-00007.bin",
41
+ "model.layers.11.self_attn.o_proj.weight": "pytorch_model-00002-of-00007.bin",
42
+ "model.layers.11.self_attn.q_proj.weight": "pytorch_model-00002-of-00007.bin",
43
+ "model.layers.11.self_attn.v_proj.weight": "pytorch_model-00002-of-00007.bin",
44
+ "model.layers.12.input_layernorm.weight": "pytorch_model-00002-of-00007.bin",
45
+ "model.layers.12.mlp.down_proj.weight": "pytorch_model-00002-of-00007.bin",
46
+ "model.layers.12.mlp.gate_proj.weight": "pytorch_model-00002-of-00007.bin",
47
+ "model.layers.12.mlp.up_proj.weight": "pytorch_model-00002-of-00007.bin",
48
+ "model.layers.12.post_attention_layernorm.weight": "pytorch_model-00002-of-00007.bin",
49
+ "model.layers.12.self_attn.k_proj.weight": "pytorch_model-00002-of-00007.bin",
50
+ "model.layers.12.self_attn.o_proj.weight": "pytorch_model-00002-of-00007.bin",
51
+ "model.layers.12.self_attn.q_proj.weight": "pytorch_model-00002-of-00007.bin",
52
+ "model.layers.12.self_attn.v_proj.weight": "pytorch_model-00002-of-00007.bin",
53
+ "model.layers.13.input_layernorm.weight": "pytorch_model-00003-of-00007.bin",
54
+ "model.layers.13.mlp.down_proj.weight": "pytorch_model-00003-of-00007.bin",
55
+ "model.layers.13.mlp.gate_proj.weight": "pytorch_model-00002-of-00007.bin",
56
+ "model.layers.13.mlp.up_proj.weight": "pytorch_model-00002-of-00007.bin",
57
+ "model.layers.13.post_attention_layernorm.weight": "pytorch_model-00003-of-00007.bin",
58
+ "model.layers.13.self_attn.k_proj.weight": "pytorch_model-00002-of-00007.bin",
59
+ "model.layers.13.self_attn.o_proj.weight": "pytorch_model-00002-of-00007.bin",
60
+ "model.layers.13.self_attn.q_proj.weight": "pytorch_model-00002-of-00007.bin",
61
+ "model.layers.13.self_attn.v_proj.weight": "pytorch_model-00002-of-00007.bin",
62
+ "model.layers.14.input_layernorm.weight": "pytorch_model-00003-of-00007.bin",
63
+ "model.layers.14.mlp.down_proj.weight": "pytorch_model-00003-of-00007.bin",
64
+ "model.layers.14.mlp.gate_proj.weight": "pytorch_model-00003-of-00007.bin",
65
+ "model.layers.14.mlp.up_proj.weight": "pytorch_model-00003-of-00007.bin",
66
+ "model.layers.14.post_attention_layernorm.weight": "pytorch_model-00003-of-00007.bin",
67
+ "model.layers.14.self_attn.k_proj.weight": "pytorch_model-00003-of-00007.bin",
68
+ "model.layers.14.self_attn.o_proj.weight": "pytorch_model-00003-of-00007.bin",
69
+ "model.layers.14.self_attn.q_proj.weight": "pytorch_model-00003-of-00007.bin",
70
+ "model.layers.14.self_attn.v_proj.weight": "pytorch_model-00003-of-00007.bin",
71
+ "model.layers.15.input_layernorm.weight": "pytorch_model-00003-of-00007.bin",
72
+ "model.layers.15.mlp.down_proj.weight": "pytorch_model-00003-of-00007.bin",
73
+ "model.layers.15.mlp.gate_proj.weight": "pytorch_model-00003-of-00007.bin",
74
+ "model.layers.15.mlp.up_proj.weight": "pytorch_model-00003-of-00007.bin",
75
+ "model.layers.15.post_attention_layernorm.weight": "pytorch_model-00003-of-00007.bin",
76
+ "model.layers.15.self_attn.k_proj.weight": "pytorch_model-00003-of-00007.bin",
77
+ "model.layers.15.self_attn.o_proj.weight": "pytorch_model-00003-of-00007.bin",
78
+ "model.layers.15.self_attn.q_proj.weight": "pytorch_model-00003-of-00007.bin",
79
+ "model.layers.15.self_attn.v_proj.weight": "pytorch_model-00003-of-00007.bin",
80
+ "model.layers.16.input_layernorm.weight": "pytorch_model-00003-of-00007.bin",
81
+ "model.layers.16.mlp.down_proj.weight": "pytorch_model-00003-of-00007.bin",
82
+ "model.layers.16.mlp.gate_proj.weight": "pytorch_model-00003-of-00007.bin",
83
+ "model.layers.16.mlp.up_proj.weight": "pytorch_model-00003-of-00007.bin",
84
+ "model.layers.16.post_attention_layernorm.weight": "pytorch_model-00003-of-00007.bin",
85
+ "model.layers.16.self_attn.k_proj.weight": "pytorch_model-00003-of-00007.bin",
86
+ "model.layers.16.self_attn.o_proj.weight": "pytorch_model-00003-of-00007.bin",
87
+ "model.layers.16.self_attn.q_proj.weight": "pytorch_model-00003-of-00007.bin",
88
+ "model.layers.16.self_attn.v_proj.weight": "pytorch_model-00003-of-00007.bin",
89
+ "model.layers.17.input_layernorm.weight": "pytorch_model-00003-of-00007.bin",
90
+ "model.layers.17.mlp.down_proj.weight": "pytorch_model-00003-of-00007.bin",
91
+ "model.layers.17.mlp.gate_proj.weight": "pytorch_model-00003-of-00007.bin",
92
+ "model.layers.17.mlp.up_proj.weight": "pytorch_model-00003-of-00007.bin",
93
+ "model.layers.17.post_attention_layernorm.weight": "pytorch_model-00003-of-00007.bin",
94
+ "model.layers.17.self_attn.k_proj.weight": "pytorch_model-00003-of-00007.bin",
95
+ "model.layers.17.self_attn.o_proj.weight": "pytorch_model-00003-of-00007.bin",
96
+ "model.layers.17.self_attn.q_proj.weight": "pytorch_model-00003-of-00007.bin",
97
+ "model.layers.17.self_attn.v_proj.weight": "pytorch_model-00003-of-00007.bin",
98
+ "model.layers.18.input_layernorm.weight": "pytorch_model-00003-of-00007.bin",
99
+ "model.layers.18.mlp.down_proj.weight": "pytorch_model-00003-of-00007.bin",
100
+ "model.layers.18.mlp.gate_proj.weight": "pytorch_model-00003-of-00007.bin",
101
+ "model.layers.18.mlp.up_proj.weight": "pytorch_model-00003-of-00007.bin",
102
+ "model.layers.18.post_attention_layernorm.weight": "pytorch_model-00003-of-00007.bin",
103
+ "model.layers.18.self_attn.k_proj.weight": "pytorch_model-00003-of-00007.bin",
104
+ "model.layers.18.self_attn.o_proj.weight": "pytorch_model-00003-of-00007.bin",
105
+ "model.layers.18.self_attn.q_proj.weight": "pytorch_model-00003-of-00007.bin",
106
+ "model.layers.18.self_attn.v_proj.weight": "pytorch_model-00003-of-00007.bin",
107
+ "model.layers.19.input_layernorm.weight": "pytorch_model-00003-of-00007.bin",
108
+ "model.layers.19.mlp.down_proj.weight": "pytorch_model-00003-of-00007.bin",
109
+ "model.layers.19.mlp.gate_proj.weight": "pytorch_model-00003-of-00007.bin",
110
+ "model.layers.19.mlp.up_proj.weight": "pytorch_model-00003-of-00007.bin",
111
+ "model.layers.19.post_attention_layernorm.weight": "pytorch_model-00003-of-00007.bin",
112
+ "model.layers.19.self_attn.k_proj.weight": "pytorch_model-00003-of-00007.bin",
113
+ "model.layers.19.self_attn.o_proj.weight": "pytorch_model-00003-of-00007.bin",
114
+ "model.layers.19.self_attn.q_proj.weight": "pytorch_model-00003-of-00007.bin",
115
+ "model.layers.19.self_attn.v_proj.weight": "pytorch_model-00003-of-00007.bin",
116
+ "model.layers.2.input_layernorm.weight": "pytorch_model-00001-of-00007.bin",
117
+ "model.layers.2.mlp.down_proj.weight": "pytorch_model-00001-of-00007.bin",
118
+ "model.layers.2.mlp.gate_proj.weight": "pytorch_model-00001-of-00007.bin",
119
+ "model.layers.2.mlp.up_proj.weight": "pytorch_model-00001-of-00007.bin",
120
+ "model.layers.2.post_attention_layernorm.weight": "pytorch_model-00001-of-00007.bin",
121
+ "model.layers.2.self_attn.k_proj.weight": "pytorch_model-00001-of-00007.bin",
122
+ "model.layers.2.self_attn.o_proj.weight": "pytorch_model-00001-of-00007.bin",
123
+ "model.layers.2.self_attn.q_proj.weight": "pytorch_model-00001-of-00007.bin",
124
+ "model.layers.2.self_attn.v_proj.weight": "pytorch_model-00001-of-00007.bin",
125
+ "model.layers.20.input_layernorm.weight": "pytorch_model-00004-of-00007.bin",
126
+ "model.layers.20.mlp.down_proj.weight": "pytorch_model-00004-of-00007.bin",
127
+ "model.layers.20.mlp.gate_proj.weight": "pytorch_model-00003-of-00007.bin",
128
+ "model.layers.20.mlp.up_proj.weight": "pytorch_model-00003-of-00007.bin",
129
+ "model.layers.20.post_attention_layernorm.weight": "pytorch_model-00004-of-00007.bin",
130
+ "model.layers.20.self_attn.k_proj.weight": "pytorch_model-00003-of-00007.bin",
131
+ "model.layers.20.self_attn.o_proj.weight": "pytorch_model-00003-of-00007.bin",
132
+ "model.layers.20.self_attn.q_proj.weight": "pytorch_model-00003-of-00007.bin",
133
+ "model.layers.20.self_attn.v_proj.weight": "pytorch_model-00003-of-00007.bin",
134
+ "model.layers.21.input_layernorm.weight": "pytorch_model-00004-of-00007.bin",
135
+ "model.layers.21.mlp.down_proj.weight": "pytorch_model-00004-of-00007.bin",
136
+ "model.layers.21.mlp.gate_proj.weight": "pytorch_model-00004-of-00007.bin",
137
+ "model.layers.21.mlp.up_proj.weight": "pytorch_model-00004-of-00007.bin",
138
+ "model.layers.21.post_attention_layernorm.weight": "pytorch_model-00004-of-00007.bin",
139
+ "model.layers.21.self_attn.k_proj.weight": "pytorch_model-00004-of-00007.bin",
140
+ "model.layers.21.self_attn.o_proj.weight": "pytorch_model-00004-of-00007.bin",
141
+ "model.layers.21.self_attn.q_proj.weight": "pytorch_model-00004-of-00007.bin",
142
+ "model.layers.21.self_attn.v_proj.weight": "pytorch_model-00004-of-00007.bin",
143
+ "model.layers.22.input_layernorm.weight": "pytorch_model-00004-of-00007.bin",
144
+ "model.layers.22.mlp.down_proj.weight": "pytorch_model-00004-of-00007.bin",
145
+ "model.layers.22.mlp.gate_proj.weight": "pytorch_model-00004-of-00007.bin",
146
+ "model.layers.22.mlp.up_proj.weight": "pytorch_model-00004-of-00007.bin",
147
+ "model.layers.22.post_attention_layernorm.weight": "pytorch_model-00004-of-00007.bin",
148
+ "model.layers.22.self_attn.k_proj.weight": "pytorch_model-00004-of-00007.bin",
149
+ "model.layers.22.self_attn.o_proj.weight": "pytorch_model-00004-of-00007.bin",
150
+ "model.layers.22.self_attn.q_proj.weight": "pytorch_model-00004-of-00007.bin",
151
+ "model.layers.22.self_attn.v_proj.weight": "pytorch_model-00004-of-00007.bin",
152
+ "model.layers.23.input_layernorm.weight": "pytorch_model-00004-of-00007.bin",
153
+ "model.layers.23.mlp.down_proj.weight": "pytorch_model-00004-of-00007.bin",
154
+ "model.layers.23.mlp.gate_proj.weight": "pytorch_model-00004-of-00007.bin",
155
+ "model.layers.23.mlp.up_proj.weight": "pytorch_model-00004-of-00007.bin",
156
+ "model.layers.23.post_attention_layernorm.weight": "pytorch_model-00004-of-00007.bin",
157
+ "model.layers.23.self_attn.k_proj.weight": "pytorch_model-00004-of-00007.bin",
158
+ "model.layers.23.self_attn.o_proj.weight": "pytorch_model-00004-of-00007.bin",
159
+ "model.layers.23.self_attn.q_proj.weight": "pytorch_model-00004-of-00007.bin",
160
+ "model.layers.23.self_attn.v_proj.weight": "pytorch_model-00004-of-00007.bin",
161
+ "model.layers.24.input_layernorm.weight": "pytorch_model-00004-of-00007.bin",
162
+ "model.layers.24.mlp.down_proj.weight": "pytorch_model-00004-of-00007.bin",
163
+ "model.layers.24.mlp.gate_proj.weight": "pytorch_model-00004-of-00007.bin",
164
+ "model.layers.24.mlp.up_proj.weight": "pytorch_model-00004-of-00007.bin",
165
+ "model.layers.24.post_attention_layernorm.weight": "pytorch_model-00004-of-00007.bin",
166
+ "model.layers.24.self_attn.k_proj.weight": "pytorch_model-00004-of-00007.bin",
167
+ "model.layers.24.self_attn.o_proj.weight": "pytorch_model-00004-of-00007.bin",
168
+ "model.layers.24.self_attn.q_proj.weight": "pytorch_model-00004-of-00007.bin",
169
+ "model.layers.24.self_attn.v_proj.weight": "pytorch_model-00004-of-00007.bin",
170
+ "model.layers.25.input_layernorm.weight": "pytorch_model-00004-of-00007.bin",
171
+ "model.layers.25.mlp.down_proj.weight": "pytorch_model-00004-of-00007.bin",
172
+ "model.layers.25.mlp.gate_proj.weight": "pytorch_model-00004-of-00007.bin",
173
+ "model.layers.25.mlp.up_proj.weight": "pytorch_model-00004-of-00007.bin",
174
+ "model.layers.25.post_attention_layernorm.weight": "pytorch_model-00004-of-00007.bin",
175
+ "model.layers.25.self_attn.k_proj.weight": "pytorch_model-00004-of-00007.bin",
176
+ "model.layers.25.self_attn.o_proj.weight": "pytorch_model-00004-of-00007.bin",
177
+ "model.layers.25.self_attn.q_proj.weight": "pytorch_model-00004-of-00007.bin",
178
+ "model.layers.25.self_attn.v_proj.weight": "pytorch_model-00004-of-00007.bin",
179
+ "model.layers.26.input_layernorm.weight": "pytorch_model-00004-of-00007.bin",
180
+ "model.layers.26.mlp.down_proj.weight": "pytorch_model-00004-of-00007.bin",
181
+ "model.layers.26.mlp.gate_proj.weight": "pytorch_model-00004-of-00007.bin",
182
+ "model.layers.26.mlp.up_proj.weight": "pytorch_model-00004-of-00007.bin",
183
+ "model.layers.26.post_attention_layernorm.weight": "pytorch_model-00004-of-00007.bin",
184
+ "model.layers.26.self_attn.k_proj.weight": "pytorch_model-00004-of-00007.bin",
185
+ "model.layers.26.self_attn.o_proj.weight": "pytorch_model-00004-of-00007.bin",
186
+ "model.layers.26.self_attn.q_proj.weight": "pytorch_model-00004-of-00007.bin",
187
+ "model.layers.26.self_attn.v_proj.weight": "pytorch_model-00004-of-00007.bin",
188
+ "model.layers.27.input_layernorm.weight": "pytorch_model-00005-of-00007.bin",
189
+ "model.layers.27.mlp.down_proj.weight": "pytorch_model-00005-of-00007.bin",
190
+ "model.layers.27.mlp.gate_proj.weight": "pytorch_model-00004-of-00007.bin",
191
+ "model.layers.27.mlp.up_proj.weight": "pytorch_model-00004-of-00007.bin",
192
+ "model.layers.27.post_attention_layernorm.weight": "pytorch_model-00005-of-00007.bin",
193
+ "model.layers.27.self_attn.k_proj.weight": "pytorch_model-00004-of-00007.bin",
194
+ "model.layers.27.self_attn.o_proj.weight": "pytorch_model-00004-of-00007.bin",
195
+ "model.layers.27.self_attn.q_proj.weight": "pytorch_model-00004-of-00007.bin",
196
+ "model.layers.27.self_attn.v_proj.weight": "pytorch_model-00004-of-00007.bin",
197
+ "model.layers.28.input_layernorm.weight": "pytorch_model-00005-of-00007.bin",
198
+ "model.layers.28.mlp.down_proj.weight": "pytorch_model-00005-of-00007.bin",
199
+ "model.layers.28.mlp.gate_proj.weight": "pytorch_model-00005-of-00007.bin",
200
+ "model.layers.28.mlp.up_proj.weight": "pytorch_model-00005-of-00007.bin",
201
+ "model.layers.28.post_attention_layernorm.weight": "pytorch_model-00005-of-00007.bin",
202
+ "model.layers.28.self_attn.k_proj.weight": "pytorch_model-00005-of-00007.bin",
203
+ "model.layers.28.self_attn.o_proj.weight": "pytorch_model-00005-of-00007.bin",
204
+ "model.layers.28.self_attn.q_proj.weight": "pytorch_model-00005-of-00007.bin",
205
+ "model.layers.28.self_attn.v_proj.weight": "pytorch_model-00005-of-00007.bin",
206
+ "model.layers.29.input_layernorm.weight": "pytorch_model-00005-of-00007.bin",
207
+ "model.layers.29.mlp.down_proj.weight": "pytorch_model-00005-of-00007.bin",
208
+ "model.layers.29.mlp.gate_proj.weight": "pytorch_model-00005-of-00007.bin",
209
+ "model.layers.29.mlp.up_proj.weight": "pytorch_model-00005-of-00007.bin",
210
+ "model.layers.29.post_attention_layernorm.weight": "pytorch_model-00005-of-00007.bin",
211
+ "model.layers.29.self_attn.k_proj.weight": "pytorch_model-00005-of-00007.bin",
212
+ "model.layers.29.self_attn.o_proj.weight": "pytorch_model-00005-of-00007.bin",
213
+ "model.layers.29.self_attn.q_proj.weight": "pytorch_model-00005-of-00007.bin",
214
+ "model.layers.29.self_attn.v_proj.weight": "pytorch_model-00005-of-00007.bin",
215
+ "model.layers.3.input_layernorm.weight": "pytorch_model-00001-of-00007.bin",
216
+ "model.layers.3.mlp.down_proj.weight": "pytorch_model-00001-of-00007.bin",
217
+ "model.layers.3.mlp.gate_proj.weight": "pytorch_model-00001-of-00007.bin",
218
+ "model.layers.3.mlp.up_proj.weight": "pytorch_model-00001-of-00007.bin",
219
+ "model.layers.3.post_attention_layernorm.weight": "pytorch_model-00001-of-00007.bin",
220
+ "model.layers.3.self_attn.k_proj.weight": "pytorch_model-00001-of-00007.bin",
221
+ "model.layers.3.self_attn.o_proj.weight": "pytorch_model-00001-of-00007.bin",
222
+ "model.layers.3.self_attn.q_proj.weight": "pytorch_model-00001-of-00007.bin",
223
+ "model.layers.3.self_attn.v_proj.weight": "pytorch_model-00001-of-00007.bin",
224
+ "model.layers.30.input_layernorm.weight": "pytorch_model-00005-of-00007.bin",
225
+ "model.layers.30.mlp.down_proj.weight": "pytorch_model-00005-of-00007.bin",
226
+ "model.layers.30.mlp.gate_proj.weight": "pytorch_model-00005-of-00007.bin",
227
+ "model.layers.30.mlp.up_proj.weight": "pytorch_model-00005-of-00007.bin",
228
+ "model.layers.30.post_attention_layernorm.weight": "pytorch_model-00005-of-00007.bin",
229
+ "model.layers.30.self_attn.k_proj.weight": "pytorch_model-00005-of-00007.bin",
230
+ "model.layers.30.self_attn.o_proj.weight": "pytorch_model-00005-of-00007.bin",
231
+ "model.layers.30.self_attn.q_proj.weight": "pytorch_model-00005-of-00007.bin",
232
+ "model.layers.30.self_attn.v_proj.weight": "pytorch_model-00005-of-00007.bin",
233
+ "model.layers.31.input_layernorm.weight": "pytorch_model-00005-of-00007.bin",
234
+ "model.layers.31.mlp.down_proj.weight": "pytorch_model-00005-of-00007.bin",
235
+ "model.layers.31.mlp.gate_proj.weight": "pytorch_model-00005-of-00007.bin",
236
+ "model.layers.31.mlp.up_proj.weight": "pytorch_model-00005-of-00007.bin",
237
+ "model.layers.31.post_attention_layernorm.weight": "pytorch_model-00005-of-00007.bin",
238
+ "model.layers.31.self_attn.k_proj.weight": "pytorch_model-00005-of-00007.bin",
239
+ "model.layers.31.self_attn.o_proj.weight": "pytorch_model-00005-of-00007.bin",
240
+ "model.layers.31.self_attn.q_proj.weight": "pytorch_model-00005-of-00007.bin",
241
+ "model.layers.31.self_attn.v_proj.weight": "pytorch_model-00005-of-00007.bin",
242
+ "model.layers.32.input_layernorm.weight": "pytorch_model-00005-of-00007.bin",
243
+ "model.layers.32.mlp.down_proj.weight": "pytorch_model-00005-of-00007.bin",
244
+ "model.layers.32.mlp.gate_proj.weight": "pytorch_model-00005-of-00007.bin",
245
+ "model.layers.32.mlp.up_proj.weight": "pytorch_model-00005-of-00007.bin",
246
+ "model.layers.32.post_attention_layernorm.weight": "pytorch_model-00005-of-00007.bin",
247
+ "model.layers.32.self_attn.k_proj.weight": "pytorch_model-00005-of-00007.bin",
248
+ "model.layers.32.self_attn.o_proj.weight": "pytorch_model-00005-of-00007.bin",
249
+ "model.layers.32.self_attn.q_proj.weight": "pytorch_model-00005-of-00007.bin",
250
+ "model.layers.32.self_attn.v_proj.weight": "pytorch_model-00005-of-00007.bin",
251
+ "model.layers.33.input_layernorm.weight": "pytorch_model-00005-of-00007.bin",
252
+ "model.layers.33.mlp.down_proj.weight": "pytorch_model-00005-of-00007.bin",
253
+ "model.layers.33.mlp.gate_proj.weight": "pytorch_model-00005-of-00007.bin",
254
+ "model.layers.33.mlp.up_proj.weight": "pytorch_model-00005-of-00007.bin",
255
+ "model.layers.33.post_attention_layernorm.weight": "pytorch_model-00005-of-00007.bin",
256
+ "model.layers.33.self_attn.k_proj.weight": "pytorch_model-00005-of-00007.bin",
257
+ "model.layers.33.self_attn.o_proj.weight": "pytorch_model-00005-of-00007.bin",
258
+ "model.layers.33.self_attn.q_proj.weight": "pytorch_model-00005-of-00007.bin",
259
+ "model.layers.33.self_attn.v_proj.weight": "pytorch_model-00005-of-00007.bin",
260
+ "model.layers.34.input_layernorm.weight": "pytorch_model-00006-of-00007.bin",
261
+ "model.layers.34.mlp.down_proj.weight": "pytorch_model-00006-of-00007.bin",
262
+ "model.layers.34.mlp.gate_proj.weight": "pytorch_model-00005-of-00007.bin",
263
+ "model.layers.34.mlp.up_proj.weight": "pytorch_model-00005-of-00007.bin",
264
+ "model.layers.34.post_attention_layernorm.weight": "pytorch_model-00006-of-00007.bin",
265
+ "model.layers.34.self_attn.k_proj.weight": "pytorch_model-00005-of-00007.bin",
266
+ "model.layers.34.self_attn.o_proj.weight": "pytorch_model-00005-of-00007.bin",
267
+ "model.layers.34.self_attn.q_proj.weight": "pytorch_model-00005-of-00007.bin",
268
+ "model.layers.34.self_attn.v_proj.weight": "pytorch_model-00005-of-00007.bin",
269
+ "model.layers.35.input_layernorm.weight": "pytorch_model-00006-of-00007.bin",
270
+ "model.layers.35.mlp.down_proj.weight": "pytorch_model-00006-of-00007.bin",
271
+ "model.layers.35.mlp.gate_proj.weight": "pytorch_model-00006-of-00007.bin",
272
+ "model.layers.35.mlp.up_proj.weight": "pytorch_model-00006-of-00007.bin",
273
+ "model.layers.35.post_attention_layernorm.weight": "pytorch_model-00006-of-00007.bin",
274
+ "model.layers.35.self_attn.k_proj.weight": "pytorch_model-00006-of-00007.bin",
275
+ "model.layers.35.self_attn.o_proj.weight": "pytorch_model-00006-of-00007.bin",
276
+ "model.layers.35.self_attn.q_proj.weight": "pytorch_model-00006-of-00007.bin",
277
+ "model.layers.35.self_attn.v_proj.weight": "pytorch_model-00006-of-00007.bin",
278
+ "model.layers.36.input_layernorm.weight": "pytorch_model-00006-of-00007.bin",
279
+ "model.layers.36.mlp.down_proj.weight": "pytorch_model-00006-of-00007.bin",
280
+ "model.layers.36.mlp.gate_proj.weight": "pytorch_model-00006-of-00007.bin",
281
+ "model.layers.36.mlp.up_proj.weight": "pytorch_model-00006-of-00007.bin",
282
+ "model.layers.36.post_attention_layernorm.weight": "pytorch_model-00006-of-00007.bin",
283
+ "model.layers.36.self_attn.k_proj.weight": "pytorch_model-00006-of-00007.bin",
284
+ "model.layers.36.self_attn.o_proj.weight": "pytorch_model-00006-of-00007.bin",
285
+ "model.layers.36.self_attn.q_proj.weight": "pytorch_model-00006-of-00007.bin",
286
+ "model.layers.36.self_attn.v_proj.weight": "pytorch_model-00006-of-00007.bin",
287
+ "model.layers.37.input_layernorm.weight": "pytorch_model-00006-of-00007.bin",
288
+ "model.layers.37.mlp.down_proj.weight": "pytorch_model-00006-of-00007.bin",
289
+ "model.layers.37.mlp.gate_proj.weight": "pytorch_model-00006-of-00007.bin",
290
+ "model.layers.37.mlp.up_proj.weight": "pytorch_model-00006-of-00007.bin",
291
+ "model.layers.37.post_attention_layernorm.weight": "pytorch_model-00006-of-00007.bin",
292
+ "model.layers.37.self_attn.k_proj.weight": "pytorch_model-00006-of-00007.bin",
293
+ "model.layers.37.self_attn.o_proj.weight": "pytorch_model-00006-of-00007.bin",
294
+ "model.layers.37.self_attn.q_proj.weight": "pytorch_model-00006-of-00007.bin",
295
+ "model.layers.37.self_attn.v_proj.weight": "pytorch_model-00006-of-00007.bin",
296
+ "model.layers.38.input_layernorm.weight": "pytorch_model-00006-of-00007.bin",
297
+ "model.layers.38.mlp.down_proj.weight": "pytorch_model-00006-of-00007.bin",
298
+ "model.layers.38.mlp.gate_proj.weight": "pytorch_model-00006-of-00007.bin",
299
+ "model.layers.38.mlp.up_proj.weight": "pytorch_model-00006-of-00007.bin",
300
+ "model.layers.38.post_attention_layernorm.weight": "pytorch_model-00006-of-00007.bin",
301
+ "model.layers.38.self_attn.k_proj.weight": "pytorch_model-00006-of-00007.bin",
302
+ "model.layers.38.self_attn.o_proj.weight": "pytorch_model-00006-of-00007.bin",
303
+ "model.layers.38.self_attn.q_proj.weight": "pytorch_model-00006-of-00007.bin",
304
+ "model.layers.38.self_attn.v_proj.weight": "pytorch_model-00006-of-00007.bin",
305
+ "model.layers.39.input_layernorm.weight": "pytorch_model-00006-of-00007.bin",
306
+ "model.layers.39.mlp.down_proj.weight": "pytorch_model-00006-of-00007.bin",
307
+ "model.layers.39.mlp.gate_proj.weight": "pytorch_model-00006-of-00007.bin",
308
+ "model.layers.39.mlp.up_proj.weight": "pytorch_model-00006-of-00007.bin",
309
+ "model.layers.39.post_attention_layernorm.weight": "pytorch_model-00006-of-00007.bin",
310
+ "model.layers.39.self_attn.k_proj.weight": "pytorch_model-00006-of-00007.bin",
311
+ "model.layers.39.self_attn.o_proj.weight": "pytorch_model-00006-of-00007.bin",
312
+ "model.layers.39.self_attn.q_proj.weight": "pytorch_model-00006-of-00007.bin",
313
+ "model.layers.39.self_attn.v_proj.weight": "pytorch_model-00006-of-00007.bin",
314
+ "model.layers.4.input_layernorm.weight": "pytorch_model-00001-of-00007.bin",
315
+ "model.layers.4.mlp.down_proj.weight": "pytorch_model-00001-of-00007.bin",
316
+ "model.layers.4.mlp.gate_proj.weight": "pytorch_model-00001-of-00007.bin",
317
+ "model.layers.4.mlp.up_proj.weight": "pytorch_model-00001-of-00007.bin",
318
+ "model.layers.4.post_attention_layernorm.weight": "pytorch_model-00001-of-00007.bin",
319
+ "model.layers.4.self_attn.k_proj.weight": "pytorch_model-00001-of-00007.bin",
320
+ "model.layers.4.self_attn.o_proj.weight": "pytorch_model-00001-of-00007.bin",
321
+ "model.layers.4.self_attn.q_proj.weight": "pytorch_model-00001-of-00007.bin",
322
+ "model.layers.4.self_attn.v_proj.weight": "pytorch_model-00001-of-00007.bin",
323
+ "model.layers.40.input_layernorm.weight": "pytorch_model-00006-of-00007.bin",
324
+ "model.layers.40.mlp.down_proj.weight": "pytorch_model-00006-of-00007.bin",
325
+ "model.layers.40.mlp.gate_proj.weight": "pytorch_model-00006-of-00007.bin",
326
+ "model.layers.40.mlp.up_proj.weight": "pytorch_model-00006-of-00007.bin",
327
+ "model.layers.40.post_attention_layernorm.weight": "pytorch_model-00006-of-00007.bin",
328
+ "model.layers.40.self_attn.k_proj.weight": "pytorch_model-00006-of-00007.bin",
329
+ "model.layers.40.self_attn.o_proj.weight": "pytorch_model-00006-of-00007.bin",
330
+ "model.layers.40.self_attn.q_proj.weight": "pytorch_model-00006-of-00007.bin",
331
+ "model.layers.40.self_attn.v_proj.weight": "pytorch_model-00006-of-00007.bin",
332
+ "model.layers.41.input_layernorm.weight": "pytorch_model-00007-of-00007.bin",
333
+ "model.layers.41.mlp.down_proj.weight": "pytorch_model-00007-of-00007.bin",
334
+ "model.layers.41.mlp.gate_proj.weight": "pytorch_model-00006-of-00007.bin",
335
+ "model.layers.41.mlp.up_proj.weight": "pytorch_model-00006-of-00007.bin",
336
+ "model.layers.41.post_attention_layernorm.weight": "pytorch_model-00007-of-00007.bin",
337
+ "model.layers.41.self_attn.k_proj.weight": "pytorch_model-00006-of-00007.bin",
338
+ "model.layers.41.self_attn.o_proj.weight": "pytorch_model-00006-of-00007.bin",
339
+ "model.layers.41.self_attn.q_proj.weight": "pytorch_model-00006-of-00007.bin",
340
+ "model.layers.41.self_attn.v_proj.weight": "pytorch_model-00006-of-00007.bin",
341
+ "model.layers.42.input_layernorm.weight": "pytorch_model-00007-of-00007.bin",
342
+ "model.layers.42.mlp.down_proj.weight": "pytorch_model-00007-of-00007.bin",
343
+ "model.layers.42.mlp.gate_proj.weight": "pytorch_model-00007-of-00007.bin",
344
+ "model.layers.42.mlp.up_proj.weight": "pytorch_model-00007-of-00007.bin",
345
+ "model.layers.42.post_attention_layernorm.weight": "pytorch_model-00007-of-00007.bin",
346
+ "model.layers.42.self_attn.k_proj.weight": "pytorch_model-00007-of-00007.bin",
347
+ "model.layers.42.self_attn.o_proj.weight": "pytorch_model-00007-of-00007.bin",
348
+ "model.layers.42.self_attn.q_proj.weight": "pytorch_model-00007-of-00007.bin",
349
+ "model.layers.42.self_attn.v_proj.weight": "pytorch_model-00007-of-00007.bin",
350
+ "model.layers.43.input_layernorm.weight": "pytorch_model-00007-of-00007.bin",
351
+ "model.layers.43.mlp.down_proj.weight": "pytorch_model-00007-of-00007.bin",
352
+ "model.layers.43.mlp.gate_proj.weight": "pytorch_model-00007-of-00007.bin",
353
+ "model.layers.43.mlp.up_proj.weight": "pytorch_model-00007-of-00007.bin",
354
+ "model.layers.43.post_attention_layernorm.weight": "pytorch_model-00007-of-00007.bin",
355
+ "model.layers.43.self_attn.k_proj.weight": "pytorch_model-00007-of-00007.bin",
356
+ "model.layers.43.self_attn.o_proj.weight": "pytorch_model-00007-of-00007.bin",
357
+ "model.layers.43.self_attn.q_proj.weight": "pytorch_model-00007-of-00007.bin",
358
+ "model.layers.43.self_attn.v_proj.weight": "pytorch_model-00007-of-00007.bin",
359
+ "model.layers.44.input_layernorm.weight": "pytorch_model-00007-of-00007.bin",
360
+ "model.layers.44.mlp.down_proj.weight": "pytorch_model-00007-of-00007.bin",
361
+ "model.layers.44.mlp.gate_proj.weight": "pytorch_model-00007-of-00007.bin",
362
+ "model.layers.44.mlp.up_proj.weight": "pytorch_model-00007-of-00007.bin",
363
+ "model.layers.44.post_attention_layernorm.weight": "pytorch_model-00007-of-00007.bin",
364
+ "model.layers.44.self_attn.k_proj.weight": "pytorch_model-00007-of-00007.bin",
365
+ "model.layers.44.self_attn.o_proj.weight": "pytorch_model-00007-of-00007.bin",
366
+ "model.layers.44.self_attn.q_proj.weight": "pytorch_model-00007-of-00007.bin",
367
+ "model.layers.44.self_attn.v_proj.weight": "pytorch_model-00007-of-00007.bin",
368
+ "model.layers.45.input_layernorm.weight": "pytorch_model-00007-of-00007.bin",
369
+ "model.layers.45.mlp.down_proj.weight": "pytorch_model-00007-of-00007.bin",
370
+ "model.layers.45.mlp.gate_proj.weight": "pytorch_model-00007-of-00007.bin",
371
+ "model.layers.45.mlp.up_proj.weight": "pytorch_model-00007-of-00007.bin",
372
+ "model.layers.45.post_attention_layernorm.weight": "pytorch_model-00007-of-00007.bin",
373
+ "model.layers.45.self_attn.k_proj.weight": "pytorch_model-00007-of-00007.bin",
374
+ "model.layers.45.self_attn.o_proj.weight": "pytorch_model-00007-of-00007.bin",
375
+ "model.layers.45.self_attn.q_proj.weight": "pytorch_model-00007-of-00007.bin",
376
+ "model.layers.45.self_attn.v_proj.weight": "pytorch_model-00007-of-00007.bin",
377
+ "model.layers.46.input_layernorm.weight": "pytorch_model-00007-of-00007.bin",
378
+ "model.layers.46.mlp.down_proj.weight": "pytorch_model-00007-of-00007.bin",
379
+ "model.layers.46.mlp.gate_proj.weight": "pytorch_model-00007-of-00007.bin",
380
+ "model.layers.46.mlp.up_proj.weight": "pytorch_model-00007-of-00007.bin",
381
+ "model.layers.46.post_attention_layernorm.weight": "pytorch_model-00007-of-00007.bin",
382
+ "model.layers.46.self_attn.k_proj.weight": "pytorch_model-00007-of-00007.bin",
383
+ "model.layers.46.self_attn.o_proj.weight": "pytorch_model-00007-of-00007.bin",
384
+ "model.layers.46.self_attn.q_proj.weight": "pytorch_model-00007-of-00007.bin",
385
+ "model.layers.46.self_attn.v_proj.weight": "pytorch_model-00007-of-00007.bin",
386
+ "model.layers.47.input_layernorm.weight": "pytorch_model-00007-of-00007.bin",
387
+ "model.layers.47.mlp.down_proj.weight": "pytorch_model-00007-of-00007.bin",
388
+ "model.layers.47.mlp.gate_proj.weight": "pytorch_model-00007-of-00007.bin",
389
+ "model.layers.47.mlp.up_proj.weight": "pytorch_model-00007-of-00007.bin",
390
+ "model.layers.47.post_attention_layernorm.weight": "pytorch_model-00007-of-00007.bin",
391
+ "model.layers.47.self_attn.k_proj.weight": "pytorch_model-00007-of-00007.bin",
392
+ "model.layers.47.self_attn.o_proj.weight": "pytorch_model-00007-of-00007.bin",
393
+ "model.layers.47.self_attn.q_proj.weight": "pytorch_model-00007-of-00007.bin",
394
+ "model.layers.47.self_attn.v_proj.weight": "pytorch_model-00007-of-00007.bin",
395
+ "model.layers.5.input_layernorm.weight": "pytorch_model-00001-of-00007.bin",
396
+ "model.layers.5.mlp.down_proj.weight": "pytorch_model-00001-of-00007.bin",
397
+ "model.layers.5.mlp.gate_proj.weight": "pytorch_model-00001-of-00007.bin",
398
+ "model.layers.5.mlp.up_proj.weight": "pytorch_model-00001-of-00007.bin",
399
+ "model.layers.5.post_attention_layernorm.weight": "pytorch_model-00001-of-00007.bin",
400
+ "model.layers.5.self_attn.k_proj.weight": "pytorch_model-00001-of-00007.bin",
401
+ "model.layers.5.self_attn.o_proj.weight": "pytorch_model-00001-of-00007.bin",
402
+ "model.layers.5.self_attn.q_proj.weight": "pytorch_model-00001-of-00007.bin",
403
+ "model.layers.5.self_attn.v_proj.weight": "pytorch_model-00001-of-00007.bin",
404
+ "model.layers.6.input_layernorm.weight": "pytorch_model-00002-of-00007.bin",
405
+ "model.layers.6.mlp.down_proj.weight": "pytorch_model-00002-of-00007.bin",
406
+ "model.layers.6.mlp.gate_proj.weight": "pytorch_model-00001-of-00007.bin",
407
+ "model.layers.6.mlp.up_proj.weight": "pytorch_model-00001-of-00007.bin",
408
+ "model.layers.6.post_attention_layernorm.weight": "pytorch_model-00002-of-00007.bin",
409
+ "model.layers.6.self_attn.k_proj.weight": "pytorch_model-00001-of-00007.bin",
410
+ "model.layers.6.self_attn.o_proj.weight": "pytorch_model-00001-of-00007.bin",
411
+ "model.layers.6.self_attn.q_proj.weight": "pytorch_model-00001-of-00007.bin",
412
+ "model.layers.6.self_attn.v_proj.weight": "pytorch_model-00001-of-00007.bin",
413
+ "model.layers.7.input_layernorm.weight": "pytorch_model-00002-of-00007.bin",
414
+ "model.layers.7.mlp.down_proj.weight": "pytorch_model-00002-of-00007.bin",
415
+ "model.layers.7.mlp.gate_proj.weight": "pytorch_model-00002-of-00007.bin",
416
+ "model.layers.7.mlp.up_proj.weight": "pytorch_model-00002-of-00007.bin",
417
+ "model.layers.7.post_attention_layernorm.weight": "pytorch_model-00002-of-00007.bin",
418
+ "model.layers.7.self_attn.k_proj.weight": "pytorch_model-00002-of-00007.bin",
419
+ "model.layers.7.self_attn.o_proj.weight": "pytorch_model-00002-of-00007.bin",
420
+ "model.layers.7.self_attn.q_proj.weight": "pytorch_model-00002-of-00007.bin",
421
+ "model.layers.7.self_attn.v_proj.weight": "pytorch_model-00002-of-00007.bin",
422
+ "model.layers.8.input_layernorm.weight": "pytorch_model-00002-of-00007.bin",
423
+ "model.layers.8.mlp.down_proj.weight": "pytorch_model-00002-of-00007.bin",
424
+ "model.layers.8.mlp.gate_proj.weight": "pytorch_model-00002-of-00007.bin",
425
+ "model.layers.8.mlp.up_proj.weight": "pytorch_model-00002-of-00007.bin",
426
+ "model.layers.8.post_attention_layernorm.weight": "pytorch_model-00002-of-00007.bin",
427
+ "model.layers.8.self_attn.k_proj.weight": "pytorch_model-00002-of-00007.bin",
428
+ "model.layers.8.self_attn.o_proj.weight": "pytorch_model-00002-of-00007.bin",
429
+ "model.layers.8.self_attn.q_proj.weight": "pytorch_model-00002-of-00007.bin",
430
+ "model.layers.8.self_attn.v_proj.weight": "pytorch_model-00002-of-00007.bin",
431
+ "model.layers.9.input_layernorm.weight": "pytorch_model-00002-of-00007.bin",
432
+ "model.layers.9.mlp.down_proj.weight": "pytorch_model-00002-of-00007.bin",
433
+ "model.layers.9.mlp.gate_proj.weight": "pytorch_model-00002-of-00007.bin",
434
+ "model.layers.9.mlp.up_proj.weight": "pytorch_model-00002-of-00007.bin",
435
+ "model.layers.9.post_attention_layernorm.weight": "pytorch_model-00002-of-00007.bin",
436
+ "model.layers.9.self_attn.k_proj.weight": "pytorch_model-00002-of-00007.bin",
437
+ "model.layers.9.self_attn.o_proj.weight": "pytorch_model-00002-of-00007.bin",
438
+ "model.layers.9.self_attn.q_proj.weight": "pytorch_model-00002-of-00007.bin",
439
+ "model.layers.9.self_attn.v_proj.weight": "pytorch_model-00002-of-00007.bin",
440
+ "model.norm.weight": "pytorch_model-00007-of-00007.bin"
441
+ }
442
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "</s>",
17
+ "unk_token": {
18
+ "content": "<unk>",
19
+ "lstrip": false,
20
+ "normalized": true,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
tokenizer_config.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "bos_token": {
5
+ "__type": "AddedToken",
6
+ "content": "<s>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "clean_up_tokenization_spaces": false,
13
+ "eos_token": {
14
+ "__type": "AddedToken",
15
+ "content": "</s>",
16
+ "lstrip": false,
17
+ "normalized": true,
18
+ "rstrip": false,
19
+ "single_word": false
20
+ },
21
+ "legacy": null,
22
+ "model_max_length": 4096,
23
+ "pad_token": null,
24
+ "padding_side": "right",
25
+ "sp_model_kwargs": {},
26
+ "spaces_between_special_tokens": false,
27
+ "tokenizer_class": "LlamaTokenizer",
28
+ "unk_token": {
29
+ "__type": "AddedToken",
30
+ "content": "<unk>",
31
+ "lstrip": false,
32
+ "normalized": true,
33
+ "rstrip": false,
34
+ "single_word": false
35
+ },
36
+ "use_default_system_prompt": true
37
+ }