Gokuldaskumar JustinLin610 commited on
Commit
27ca79f
0 Parent(s):

Duplicate from Qwen/Qwen1.5-72B-Chat

Browse files

Co-authored-by: Junyang Lin <JustinLin610@users.noreply.huggingface.co>

Files changed (48) hide show
  1. .gitattributes +35 -0
  2. LICENSE +53 -0
  3. README.md +97 -0
  4. config.json +27 -0
  5. generation_config.json +13 -0
  6. merges.txt +0 -0
  7. model-00001-of-00038.safetensors +3 -0
  8. model-00002-of-00038.safetensors +3 -0
  9. model-00003-of-00038.safetensors +3 -0
  10. model-00004-of-00038.safetensors +3 -0
  11. model-00005-of-00038.safetensors +3 -0
  12. model-00006-of-00038.safetensors +3 -0
  13. model-00007-of-00038.safetensors +3 -0
  14. model-00008-of-00038.safetensors +3 -0
  15. model-00009-of-00038.safetensors +3 -0
  16. model-00010-of-00038.safetensors +3 -0
  17. model-00011-of-00038.safetensors +3 -0
  18. model-00012-of-00038.safetensors +3 -0
  19. model-00013-of-00038.safetensors +3 -0
  20. model-00014-of-00038.safetensors +3 -0
  21. model-00015-of-00038.safetensors +3 -0
  22. model-00016-of-00038.safetensors +3 -0
  23. model-00017-of-00038.safetensors +3 -0
  24. model-00018-of-00038.safetensors +3 -0
  25. model-00019-of-00038.safetensors +3 -0
  26. model-00020-of-00038.safetensors +3 -0
  27. model-00021-of-00038.safetensors +3 -0
  28. model-00022-of-00038.safetensors +3 -0
  29. model-00023-of-00038.safetensors +3 -0
  30. model-00024-of-00038.safetensors +3 -0
  31. model-00025-of-00038.safetensors +3 -0
  32. model-00026-of-00038.safetensors +3 -0
  33. model-00027-of-00038.safetensors +3 -0
  34. model-00028-of-00038.safetensors +3 -0
  35. model-00029-of-00038.safetensors +3 -0
  36. model-00030-of-00038.safetensors +3 -0
  37. model-00031-of-00038.safetensors +3 -0
  38. model-00032-of-00038.safetensors +3 -0
  39. model-00033-of-00038.safetensors +3 -0
  40. model-00034-of-00038.safetensors +3 -0
  41. model-00035-of-00038.safetensors +3 -0
  42. model-00036-of-00038.safetensors +3 -0
  43. model-00037-of-00038.safetensors +3 -0
  44. model-00038-of-00038.safetensors +3 -0
  45. model.safetensors.index.json +970 -0
  46. tokenizer.json +0 -0
  47. tokenizer_config.json +40 -0
  48. vocab.json +0 -0
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
LICENSE ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Tongyi Qianwen LICENSE AGREEMENT
2
+
3
+ Tongyi Qianwen Release Date: August 3, 2023
4
+
5
+ By clicking to agree or by using or distributing any portion or element of the Tongyi Qianwen Materials, you will be deemed to have recognized and accepted the content of this Agreement, which is effective immediately.
6
+
7
+ 1. Definitions
8
+ a. This Tongyi Qianwen LICENSE AGREEMENT (this "Agreement") shall mean the terms and conditions for use, reproduction, distribution and modification of the Materials as defined by this Agreement.
9
+ b. "We"(or "Us") shall mean Alibaba Cloud.
10
+ c. "You" (or "Your") shall mean a natural person or legal entity exercising the rights granted by this Agreement and/or using the Materials for any purpose and in any field of use.
11
+ d. "Third Parties" shall mean individuals or legal entities that are not under common control with Us or You.
12
+ e. "Tongyi Qianwen" shall mean the large language models (including Qwen model and Qwen-Chat model), and software and algorithms, consisting of trained model weights, parameters (including optimizer states), machine-learning model code, inference-enabling code, training-enabling code, fine-tuning enabling code and other elements of the foregoing distributed by Us.
13
+ f. "Materials" shall mean, collectively, Alibaba Cloud's proprietary Tongyi Qianwen and Documentation (and any portion thereof) made available under this Agreement.
14
+ g. "Source" form shall mean the preferred form for making modifications, including but not limited to model source code, documentation source, and configuration files.
15
+ h. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation,
16
+ and conversions to other media types.
17
+
18
+ 2. Grant of Rights
19
+ You are granted a non-exclusive, worldwide, non-transferable and royalty-free limited license under Alibaba Cloud's intellectual property or other rights owned by Us embodied in the Materials to use, reproduce, distribute, copy, create derivative works of, and make modifications to the Materials.
20
+
21
+ 3. Redistribution
22
+ You may reproduce and distribute copies of the Materials or derivative works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions:
23
+ a. You shall give any other recipients of the Materials or derivative works a copy of this Agreement;
24
+ b. You shall cause any modified files to carry prominent notices stating that You changed the files;
25
+ c. You shall retain in all copies of the Materials that You distribute the following attribution notices within a "Notice" text file distributed as a part of such copies: "Tongyi Qianwen is licensed under the Tongyi Qianwen LICENSE AGREEMENT, Copyright (c) Alibaba Cloud. All Rights Reserved."; and
26
+ d. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such derivative works as a whole, provided Your use, reproduction, and distribution of the work otherwise complies with the terms and conditions of this Agreement.
27
+
28
+ 4. Restrictions
29
+ If you are commercially using the Materials, and your product or service has more than 100 million monthly active users, You shall request a license from Us. You cannot exercise your rights under this Agreement without our express authorization.
30
+
31
+ 5. Rules of use
32
+ a. The Materials may be subject to export controls or restrictions in China, the United States or other countries or regions. You shall comply with applicable laws and regulations in your use of the Materials.
33
+ b. You can not use the Materials or any output therefrom to improve any other large language model (excluding Tongyi Qianwen or derivative works thereof).
34
+
35
+ 6. Intellectual Property
36
+ a. We retain ownership of all intellectual property rights in and to the Materials and derivatives made by or for Us. Conditioned upon compliance with the terms and conditions of this Agreement, with respect to any derivative works and modifications of the Materials that are made by you, you are and will be the owner of such derivative works and modifications.
37
+ b. No trademark license is granted to use the trade names, trademarks, service marks, or product names of Us, except as required to fulfill notice requirements under this Agreement or as required for reasonable and customary use in describing and redistributing the Materials.
38
+ c. If you commence a lawsuit or other proceedings (including a cross-claim or counterclaim in a lawsuit) against Us or any entity alleging that the Materials or any output therefrom, or any part of the foregoing, infringe any intellectual property or other right owned or licensable by you, then all licences granted to you under this Agreement shall terminate as of the date such lawsuit or other proceeding is commenced or brought.
39
+
40
+ 7. Disclaimer of Warranty and Limitation of Liability
41
+
42
+ a. We are not obligated to support, update, provide training for, or develop any further version of the Tongyi Qianwen Materials or to grant any license thereto.
43
+ b. THE MATERIALS ARE PROVIDED "AS IS" WITHOUT ANY EXPRESS OR IMPLIED WARRANTY OF ANY KIND INCLUDING WARRANTIES OF MERCHANTABILITY, NONINFRINGEMENT, OR FITNESS FOR A PARTICULAR PURPOSE. WE MAKE NO WARRANTY AND ASSUME NO RESPONSIBILITY FOR THE SAFETY OR STABILITY OF THE MATERIALS AND ANY OUTPUT THEREFROM.
44
+ c. IN NO EVENT SHALL WE BE LIABLE TO YOU FOR ANY DAMAGES, INCLUDING, BUT NOT LIMITED TO ANY DIRECT, OR INDIRECT, SPECIAL OR CONSEQUENTIAL DAMAGES ARISING FROM YOUR USE OR INABILITY TO USE THE MATERIALS OR ANY OUTPUT OF IT, NO MATTER HOW IT’S CAUSED.
45
+ d. You will defend, indemnify and hold harmless Us from and against any claim by any third party arising out of or related to your use or distribution of the Materials.
46
+
47
+ 8. Survival and Termination.
48
+ a. The term of this Agreement shall commence upon your acceptance of this Agreement or access to the Materials and will continue in full force and effect until terminated in accordance with the terms and conditions herein.
49
+ b. We may terminate this Agreement if you breach any of the terms or conditions of this Agreement. Upon termination of this Agreement, you must delete and cease use of the Materials. Sections 7 and 9 shall survive the termination of this Agreement.
50
+
51
+ 9. Governing Law and Jurisdiction.
52
+ a. This Agreement and any dispute arising out of or relating to it will be governed by the laws of China, without regard to conflict of law principles, and the UN Convention on Contracts for the International Sale of Goods does not apply to this Agreement.
53
+ b. The People's Courts in Hangzhou City shall have exclusive jurisdiction over any dispute arising out of this Agreement.
README.md ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: other
3
+ license_name: tongyi-qianwen
4
+ license_link: >-
5
+ https://huggingface.co/Qwen/Qwen1.5-72B-Chat/blob/main/LICENSE
6
+ language:
7
+ - en
8
+ pipeline_tag: text-generation
9
+ tags:
10
+ - chat
11
+ ---
12
+
13
+ # Qwen1.5-72B-Chat
14
+
15
+
16
+ ## Introduction
17
+
18
+ Qwen1.5 is the beta version of Qwen2, a transformer-based decoder-only language model pretrained on a large amount of data. In comparison with the previous released Qwen, the improvements include:
19
+
20
+ * 6 model sizes, including 0.5B, 1.8B, 4B, 7B, 14B, and 72B;
21
+ * Significant performance improvement in human preference for chat models;
22
+ * Multilingual support of both base and chat models;
23
+ * Stable support of 32K context length for models of all sizes
24
+ * No need of `trust_remote_code`.
25
+
26
+ For more details, please refer to our [blog post](https://qwenlm.github.io/blog/qwen1.5/) and [GitHub repo](https://github.com/QwenLM/Qwen1.5).
27
+ <br>
28
+
29
+ ## Model Details
30
+ Qwen1.5 is a language model series including decoder language models of different model sizes. For each size, we release the base language model and the aligned chat model. It is based on the Transformer architecture with SwiGLU activation, attention QKV bias, group query attention, mixture of sliding window attention and full attention, etc. Additionally, we have an improved tokenizer adaptive to multiple natural languages and codes. For the beta version, temporarily we did not include GQA and the mixture of SWA and full attention.
31
+
32
+ ## Training details
33
+ We pretrained the models with a large amount of data, and we post-trained the models with both supervised finetuning and direct preference optimization. However, DPO leads to improvements in human preference evaluation but degradation in benchmark evaluation. In the very near future, we will fix both problems.
34
+
35
+ ## Requirements
36
+ The code of Qwen1.5 has been in the latest Hugging face transformers and we advise you to install `transformers>=4.37.0`, or you might encounter the following error:
37
+ ```
38
+ KeyError: 'qwen2'
39
+ ```
40
+
41
+ ## Quickstart
42
+
43
+ Here provides a code snippet with `apply_chat_template` to show you how to load the tokenizer and model and how to generate contents.
44
+
45
+ ```python
46
+ from transformers import AutoModelForCausalLM, AutoTokenizer
47
+ device = "cuda" # the device to load the model onto
48
+
49
+ model = AutoModelForCausalLM.from_pretrained(
50
+ "Qwen/Qwen1.5-72B-Chat",
51
+ device_map="auto"
52
+ )
53
+ tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen1.5-72B-Chat")
54
+
55
+ prompt = "Give me a short introduction to large language model."
56
+ messages = [
57
+ {"role": "system", "content": "You are a helpful assistant."},
58
+ {"role": "user", "content": prompt}
59
+ ]
60
+ text = tokenizer.apply_chat_template(
61
+ messages,
62
+ tokenize=False,
63
+ add_generation_prompt=True
64
+ )
65
+ model_inputs = tokenizer([text], return_tensors="pt").to(device)
66
+
67
+ generated_ids = model.generate(
68
+ model_inputs.input_ids,
69
+ max_new_tokens=512
70
+ )
71
+ generated_ids = [
72
+ output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
73
+ ]
74
+
75
+ response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
76
+ ```
77
+
78
+ For quantized models, we advise you to use the GPTQ, AWQ, and GGUF correspondents, namely `Qwen1.5-72B-Chat-GPTQ-Int4`, `Qwen1.5-72B-Chat-GPTQ-Int8`, `Qwen1.5-72B-Chat-AWQ`, and `Qwen1.5-72B-Chat-GGUF`.
79
+
80
+
81
+ ## Tips
82
+
83
+ * If you encounter code switching or other bad cases, we advise you to use our provided hyper-parameters in `generation_config.json`.
84
+
85
+
86
+ ## Citation
87
+
88
+ If you find our work helpful, feel free to give us a cite.
89
+
90
+ ```
91
+ @article{qwen,
92
+ title={Qwen Technical Report},
93
+ author={Jinze Bai and Shuai Bai and Yunfei Chu and Zeyu Cui and Kai Dang and Xiaodong Deng and Yang Fan and Wenbin Ge and Yu Han and Fei Huang and Binyuan Hui and Luo Ji and Mei Li and Junyang Lin and Runji Lin and Dayiheng Liu and Gao Liu and Chengqiang Lu and Keming Lu and Jianxin Ma and Rui Men and Xingzhang Ren and Xuancheng Ren and Chuanqi Tan and Sinan Tan and Jianhong Tu and Peng Wang and Shijie Wang and Wei Wang and Shengguang Wu and Benfeng Xu and Jin Xu and An Yang and Hao Yang and Jian Yang and Shusheng Yang and Yang Yao and Bowen Yu and Hongyi Yuan and Zheng Yuan and Jianwei Zhang and Xingxuan Zhang and Yichang Zhang and Zhenru Zhang and Chang Zhou and Jingren Zhou and Xiaohuan Zhou and Tianhang Zhu},
94
+ journal={arXiv preprint arXiv:2309.16609},
95
+ year={2023}
96
+ }
97
+ ```
config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Qwen2ForCausalLM"
4
+ ],
5
+ "attention_dropout": 0.0,
6
+ "bos_token_id": 151643,
7
+ "eos_token_id": 151643,
8
+ "hidden_act": "silu",
9
+ "hidden_size": 8192,
10
+ "initializer_range": 0.02,
11
+ "intermediate_size": 24576,
12
+ "max_position_embeddings": 32768,
13
+ "max_window_layers": 70,
14
+ "model_type": "qwen2",
15
+ "num_attention_heads": 64,
16
+ "num_hidden_layers": 80,
17
+ "num_key_value_heads": 64,
18
+ "rms_norm_eps": 1e-06,
19
+ "rope_theta": 1000000.0,
20
+ "sliding_window": 32768,
21
+ "tie_word_embeddings": false,
22
+ "torch_dtype": "bfloat16",
23
+ "transformers_version": "4.37.0",
24
+ "use_cache": true,
25
+ "use_sliding_window": false,
26
+ "vocab_size": 152064
27
+ }
generation_config.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 151645,
6
+ 151643
7
+ ],
8
+ "repetition_penalty": 1.05,
9
+ "temperature": 0.7,
10
+ "top_p": 0.8,
11
+ "top_k": 20,
12
+ "transformers_version": "4.37.0"
13
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model-00001-of-00038.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:54b8f122f003fe931e496e7886d3ab69619e57981ccf2d1370878785e5205fac
3
+ size 3833644176
model-00002-of-00038.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e084f2965fa257fe762b6365e256ce886637428d7fc64e0f2dcbb2500ffd24f7
3
+ size 3892513824
model-00003-of-00038.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2e67aff581b75af768452dc54c9831da8c2d02b0e9c1abe67d9db071edf6aab9
3
+ size 3892530552
model-00004-of-00038.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad900cff56c1bd8c4f83f26631d537d3249f62443a236193d3eb30374840ef60
3
+ size 3624045376
model-00005-of-00038.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a5dda4d41dec1d3b14e329de5be80f6212e54e9433d40143bef892425e3c2c87
3
+ size 3892480832
model-00006-of-00038.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d0344ccea12477eda1a0abb3a7158ea6c3a3abe014ed646ccb264a88622ce1a
3
+ size 3892480848
model-00007-of-00038.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:86b65f1bbf910ede2fcf1f1612dc744ad6a3840e489567c24f82cd7eda18052b
3
+ size 3892513856
model-00008-of-00038.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5b5d475d6a8bac268d1c5a114b29aaab9937d4b087aa280de4a1ce7631ade8ce
3
+ size 3892530576
model-00009-of-00038.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e13d7b967105580172e3fb37c5c0adf3739d22035e57472989233c9a3ba3e536
3
+ size 3624045400
model-00010-of-00038.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c79dcb62fc43d6219b44fbf9a6696724e11492364d790ae1aa94493779b44aa5
3
+ size 3892480856
model-00011-of-00038.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9b081a6a8d306caf8103b529b86ce1b08ea78416c2c479c85c10601cc5c5ff15
3
+ size 3892480848
model-00012-of-00038.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:79e1b4f8a85aae6f8ca909c940d58d3723bbef351a1fc017ca867183922919fa
3
+ size 3892513856
model-00013-of-00038.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2a3a51a9d6a5d2cfc0b52a23299dff97b398409a7a9df584dee2f0e646e033f4
3
+ size 3892530576
model-00014-of-00038.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a943afda0de57879229a4010018df2b3d5095c2ae6a9a1b464e751af0e6b4250
3
+ size 3624045400
model-00015-of-00038.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea4f0698740efa2566b46df85c802b2f83bc0607ec3da951c24bbedde722729e
3
+ size 3892480856
model-00016-of-00038.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c836f23b31da344db5ee984d8b5d0cfce1375cc695ffb646b7dd7243cee37c2
3
+ size 3892480848
model-00017-of-00038.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d79b147972cce726d4939e4c0f85f06e650bd42cb2081717f17dd30ea20af40
3
+ size 3892513856
model-00018-of-00038.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:80cd21486aa4bb100c32698cfba48590bfc6973c2b7f4b89abffcff860ee229b
3
+ size 3892530576
model-00019-of-00038.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:940ddd4ada0348a1e0d381657c330ef115ac67e0fc89e3ceda64dd376c34c1fc
3
+ size 3624045400
model-00020-of-00038.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b5863e99060c79bac0d121ed93fccefcf8388ad780b62ff325ff88615640ba66
3
+ size 3892480856
model-00021-of-00038.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2fdc54f92c843b74e200cf272956379930c27d5057c46ff2e5a41b1cdb5264f1
3
+ size 3892480848
model-00022-of-00038.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da613141e018406962c767d5b5d8ede5e5a3d4c7a1990fcfb3c3b47f2885a6bc
3
+ size 3892513856
model-00023-of-00038.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4c3baf8c919a7b55263bc483ad0d120ebf42ae5c815ecbb4f9abe4c5f3a3c742
3
+ size 3892530576
model-00024-of-00038.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a7b89da8bc143c9cac70ca01c098175252e1df062373e81676ade6f7d76a1152
3
+ size 3624045400
model-00025-of-00038.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:73295487c4aca41ca678ec4f880d15aa9c15078184116dcfa384de303924e824
3
+ size 3892480856
model-00026-of-00038.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:972302b29569bca5d94dffc91c4327862c71dafd785b36687673b089103e548a
3
+ size 3892480848
model-00027-of-00038.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a50f7c65ba265f696b4cc6b95b228167f8e6e8d5af2c53ffa138a3ddf470375c
3
+ size 3892513856
model-00028-of-00038.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f89e8abc271a613d9194a85902ea8543afb300363549532fb93367535cbb9ee0
3
+ size 3892530576
model-00029-of-00038.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd1f60737ce540b8f43ad27901bd75a1ed8dff46cc0b5e8b566755c7a573f0c6
3
+ size 3624045400
model-00030-of-00038.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:901b54e4cdcf25784c4fe7642a105dc7975bbba8fb4b01f37b0a6122482f8844
3
+ size 3892480856
model-00031-of-00038.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:38e2b7bc0af5e857971e3258c4517a080593c6f16a0ff72e5f7e2caab0d58ccf
3
+ size 3892480848
model-00032-of-00038.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:52338a830f37a71d9300cfc5395628d09af0f1deef6bf3ef8727cd0177e39758
3
+ size 3892513856
model-00033-of-00038.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:46bd2679867606608f2082b4e8f7f314756cdbbf15271174e91903473015297f
3
+ size 3892530576
model-00034-of-00038.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9ce3ea6308a606cce3d7108450a0bcf7cf761f235e033e7e967e31e714715e34
3
+ size 3624045400
model-00035-of-00038.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0fc6d7c383d35ff6c31b780f4e9f3e33523566f1f01af1c3d87545c872ddcf17
3
+ size 3892480856
model-00036-of-00038.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c7f16cc91e24bbd1bc182154a7c9425f7ac9640a1236e2ec0cbcb768267309a
3
+ size 3892480848
model-00037-of-00038.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:04d7dd9ac82f550d452dccd4a6848894bc9f813c8dd933d538dbae7cb68824cd
3
+ size 3892530328
model-00038-of-00038.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e7965a615fb7480851cee4edf83ecde39515726d058a2bba85c1b47ed5895b49
3
+ size 2491416704
model.safetensors.index.json ADDED
@@ -0,0 +1,970 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 144575840256
4
+ },
5
+ "weight_map": {
6
+ "lm_head.weight": "model-00038-of-00038.safetensors",
7
+ "model.embed_tokens.weight": "model-00001-of-00038.safetensors",
8
+ "model.layers.0.input_layernorm.weight": "model-00002-of-00038.safetensors",
9
+ "model.layers.0.mlp.down_proj.weight": "model-00002-of-00038.safetensors",
10
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00038.safetensors",
11
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00038.safetensors",
12
+ "model.layers.0.post_attention_layernorm.weight": "model-00002-of-00038.safetensors",
13
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00038.safetensors",
14
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00038.safetensors",
15
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00038.safetensors",
16
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00038.safetensors",
17
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00038.safetensors",
18
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00038.safetensors",
19
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00038.safetensors",
20
+ "model.layers.1.input_layernorm.weight": "model-00002-of-00038.safetensors",
21
+ "model.layers.1.mlp.down_proj.weight": "model-00002-of-00038.safetensors",
22
+ "model.layers.1.mlp.gate_proj.weight": "model-00002-of-00038.safetensors",
23
+ "model.layers.1.mlp.up_proj.weight": "model-00002-of-00038.safetensors",
24
+ "model.layers.1.post_attention_layernorm.weight": "model-00002-of-00038.safetensors",
25
+ "model.layers.1.self_attn.k_proj.bias": "model-00002-of-00038.safetensors",
26
+ "model.layers.1.self_attn.k_proj.weight": "model-00002-of-00038.safetensors",
27
+ "model.layers.1.self_attn.o_proj.weight": "model-00002-of-00038.safetensors",
28
+ "model.layers.1.self_attn.q_proj.bias": "model-00002-of-00038.safetensors",
29
+ "model.layers.1.self_attn.q_proj.weight": "model-00002-of-00038.safetensors",
30
+ "model.layers.1.self_attn.v_proj.bias": "model-00002-of-00038.safetensors",
31
+ "model.layers.1.self_attn.v_proj.weight": "model-00002-of-00038.safetensors",
32
+ "model.layers.10.input_layernorm.weight": "model-00006-of-00038.safetensors",
33
+ "model.layers.10.mlp.down_proj.weight": "model-00006-of-00038.safetensors",
34
+ "model.layers.10.mlp.gate_proj.weight": "model-00006-of-00038.safetensors",
35
+ "model.layers.10.mlp.up_proj.weight": "model-00006-of-00038.safetensors",
36
+ "model.layers.10.post_attention_layernorm.weight": "model-00006-of-00038.safetensors",
37
+ "model.layers.10.self_attn.k_proj.bias": "model-00006-of-00038.safetensors",
38
+ "model.layers.10.self_attn.k_proj.weight": "model-00006-of-00038.safetensors",
39
+ "model.layers.10.self_attn.o_proj.weight": "model-00006-of-00038.safetensors",
40
+ "model.layers.10.self_attn.q_proj.bias": "model-00006-of-00038.safetensors",
41
+ "model.layers.10.self_attn.q_proj.weight": "model-00006-of-00038.safetensors",
42
+ "model.layers.10.self_attn.v_proj.bias": "model-00006-of-00038.safetensors",
43
+ "model.layers.10.self_attn.v_proj.weight": "model-00006-of-00038.safetensors",
44
+ "model.layers.11.input_layernorm.weight": "model-00007-of-00038.safetensors",
45
+ "model.layers.11.mlp.down_proj.weight": "model-00007-of-00038.safetensors",
46
+ "model.layers.11.mlp.gate_proj.weight": "model-00006-of-00038.safetensors",
47
+ "model.layers.11.mlp.up_proj.weight": "model-00006-of-00038.safetensors",
48
+ "model.layers.11.post_attention_layernorm.weight": "model-00007-of-00038.safetensors",
49
+ "model.layers.11.self_attn.k_proj.bias": "model-00006-of-00038.safetensors",
50
+ "model.layers.11.self_attn.k_proj.weight": "model-00006-of-00038.safetensors",
51
+ "model.layers.11.self_attn.o_proj.weight": "model-00006-of-00038.safetensors",
52
+ "model.layers.11.self_attn.q_proj.bias": "model-00006-of-00038.safetensors",
53
+ "model.layers.11.self_attn.q_proj.weight": "model-00006-of-00038.safetensors",
54
+ "model.layers.11.self_attn.v_proj.bias": "model-00006-of-00038.safetensors",
55
+ "model.layers.11.self_attn.v_proj.weight": "model-00006-of-00038.safetensors",
56
+ "model.layers.12.input_layernorm.weight": "model-00007-of-00038.safetensors",
57
+ "model.layers.12.mlp.down_proj.weight": "model-00007-of-00038.safetensors",
58
+ "model.layers.12.mlp.gate_proj.weight": "model-00007-of-00038.safetensors",
59
+ "model.layers.12.mlp.up_proj.weight": "model-00007-of-00038.safetensors",
60
+ "model.layers.12.post_attention_layernorm.weight": "model-00007-of-00038.safetensors",
61
+ "model.layers.12.self_attn.k_proj.bias": "model-00007-of-00038.safetensors",
62
+ "model.layers.12.self_attn.k_proj.weight": "model-00007-of-00038.safetensors",
63
+ "model.layers.12.self_attn.o_proj.weight": "model-00007-of-00038.safetensors",
64
+ "model.layers.12.self_attn.q_proj.bias": "model-00007-of-00038.safetensors",
65
+ "model.layers.12.self_attn.q_proj.weight": "model-00007-of-00038.safetensors",
66
+ "model.layers.12.self_attn.v_proj.bias": "model-00007-of-00038.safetensors",
67
+ "model.layers.12.self_attn.v_proj.weight": "model-00007-of-00038.safetensors",
68
+ "model.layers.13.input_layernorm.weight": "model-00007-of-00038.safetensors",
69
+ "model.layers.13.mlp.down_proj.weight": "model-00007-of-00038.safetensors",
70
+ "model.layers.13.mlp.gate_proj.weight": "model-00007-of-00038.safetensors",
71
+ "model.layers.13.mlp.up_proj.weight": "model-00007-of-00038.safetensors",
72
+ "model.layers.13.post_attention_layernorm.weight": "model-00007-of-00038.safetensors",
73
+ "model.layers.13.self_attn.k_proj.bias": "model-00007-of-00038.safetensors",
74
+ "model.layers.13.self_attn.k_proj.weight": "model-00007-of-00038.safetensors",
75
+ "model.layers.13.self_attn.o_proj.weight": "model-00007-of-00038.safetensors",
76
+ "model.layers.13.self_attn.q_proj.bias": "model-00007-of-00038.safetensors",
77
+ "model.layers.13.self_attn.q_proj.weight": "model-00007-of-00038.safetensors",
78
+ "model.layers.13.self_attn.v_proj.bias": "model-00007-of-00038.safetensors",
79
+ "model.layers.13.self_attn.v_proj.weight": "model-00007-of-00038.safetensors",
80
+ "model.layers.14.input_layernorm.weight": "model-00008-of-00038.safetensors",
81
+ "model.layers.14.mlp.down_proj.weight": "model-00008-of-00038.safetensors",
82
+ "model.layers.14.mlp.gate_proj.weight": "model-00008-of-00038.safetensors",
83
+ "model.layers.14.mlp.up_proj.weight": "model-00008-of-00038.safetensors",
84
+ "model.layers.14.post_attention_layernorm.weight": "model-00008-of-00038.safetensors",
85
+ "model.layers.14.self_attn.k_proj.bias": "model-00008-of-00038.safetensors",
86
+ "model.layers.14.self_attn.k_proj.weight": "model-00008-of-00038.safetensors",
87
+ "model.layers.14.self_attn.o_proj.weight": "model-00008-of-00038.safetensors",
88
+ "model.layers.14.self_attn.q_proj.bias": "model-00008-of-00038.safetensors",
89
+ "model.layers.14.self_attn.q_proj.weight": "model-00008-of-00038.safetensors",
90
+ "model.layers.14.self_attn.v_proj.bias": "model-00008-of-00038.safetensors",
91
+ "model.layers.14.self_attn.v_proj.weight": "model-00008-of-00038.safetensors",
92
+ "model.layers.15.input_layernorm.weight": "model-00008-of-00038.safetensors",
93
+ "model.layers.15.mlp.down_proj.weight": "model-00008-of-00038.safetensors",
94
+ "model.layers.15.mlp.gate_proj.weight": "model-00008-of-00038.safetensors",
95
+ "model.layers.15.mlp.up_proj.weight": "model-00008-of-00038.safetensors",
96
+ "model.layers.15.post_attention_layernorm.weight": "model-00008-of-00038.safetensors",
97
+ "model.layers.15.self_attn.k_proj.bias": "model-00008-of-00038.safetensors",
98
+ "model.layers.15.self_attn.k_proj.weight": "model-00008-of-00038.safetensors",
99
+ "model.layers.15.self_attn.o_proj.weight": "model-00008-of-00038.safetensors",
100
+ "model.layers.15.self_attn.q_proj.bias": "model-00008-of-00038.safetensors",
101
+ "model.layers.15.self_attn.q_proj.weight": "model-00008-of-00038.safetensors",
102
+ "model.layers.15.self_attn.v_proj.bias": "model-00008-of-00038.safetensors",
103
+ "model.layers.15.self_attn.v_proj.weight": "model-00008-of-00038.safetensors",
104
+ "model.layers.16.input_layernorm.weight": "model-00009-of-00038.safetensors",
105
+ "model.layers.16.mlp.down_proj.weight": "model-00009-of-00038.safetensors",
106
+ "model.layers.16.mlp.gate_proj.weight": "model-00009-of-00038.safetensors",
107
+ "model.layers.16.mlp.up_proj.weight": "model-00009-of-00038.safetensors",
108
+ "model.layers.16.post_attention_layernorm.weight": "model-00009-of-00038.safetensors",
109
+ "model.layers.16.self_attn.k_proj.bias": "model-00008-of-00038.safetensors",
110
+ "model.layers.16.self_attn.k_proj.weight": "model-00008-of-00038.safetensors",
111
+ "model.layers.16.self_attn.o_proj.weight": "model-00009-of-00038.safetensors",
112
+ "model.layers.16.self_attn.q_proj.bias": "model-00008-of-00038.safetensors",
113
+ "model.layers.16.self_attn.q_proj.weight": "model-00008-of-00038.safetensors",
114
+ "model.layers.16.self_attn.v_proj.bias": "model-00008-of-00038.safetensors",
115
+ "model.layers.16.self_attn.v_proj.weight": "model-00008-of-00038.safetensors",
116
+ "model.layers.17.input_layernorm.weight": "model-00009-of-00038.safetensors",
117
+ "model.layers.17.mlp.down_proj.weight": "model-00009-of-00038.safetensors",
118
+ "model.layers.17.mlp.gate_proj.weight": "model-00009-of-00038.safetensors",
119
+ "model.layers.17.mlp.up_proj.weight": "model-00009-of-00038.safetensors",
120
+ "model.layers.17.post_attention_layernorm.weight": "model-00009-of-00038.safetensors",
121
+ "model.layers.17.self_attn.k_proj.bias": "model-00009-of-00038.safetensors",
122
+ "model.layers.17.self_attn.k_proj.weight": "model-00009-of-00038.safetensors",
123
+ "model.layers.17.self_attn.o_proj.weight": "model-00009-of-00038.safetensors",
124
+ "model.layers.17.self_attn.q_proj.bias": "model-00009-of-00038.safetensors",
125
+ "model.layers.17.self_attn.q_proj.weight": "model-00009-of-00038.safetensors",
126
+ "model.layers.17.self_attn.v_proj.bias": "model-00009-of-00038.safetensors",
127
+ "model.layers.17.self_attn.v_proj.weight": "model-00009-of-00038.safetensors",
128
+ "model.layers.18.input_layernorm.weight": "model-00010-of-00038.safetensors",
129
+ "model.layers.18.mlp.down_proj.weight": "model-00010-of-00038.safetensors",
130
+ "model.layers.18.mlp.gate_proj.weight": "model-00010-of-00038.safetensors",
131
+ "model.layers.18.mlp.up_proj.weight": "model-00010-of-00038.safetensors",
132
+ "model.layers.18.post_attention_layernorm.weight": "model-00010-of-00038.safetensors",
133
+ "model.layers.18.self_attn.k_proj.bias": "model-00009-of-00038.safetensors",
134
+ "model.layers.18.self_attn.k_proj.weight": "model-00009-of-00038.safetensors",
135
+ "model.layers.18.self_attn.o_proj.weight": "model-00009-of-00038.safetensors",
136
+ "model.layers.18.self_attn.q_proj.bias": "model-00009-of-00038.safetensors",
137
+ "model.layers.18.self_attn.q_proj.weight": "model-00009-of-00038.safetensors",
138
+ "model.layers.18.self_attn.v_proj.bias": "model-00009-of-00038.safetensors",
139
+ "model.layers.18.self_attn.v_proj.weight": "model-00009-of-00038.safetensors",
140
+ "model.layers.19.input_layernorm.weight": "model-00010-of-00038.safetensors",
141
+ "model.layers.19.mlp.down_proj.weight": "model-00010-of-00038.safetensors",
142
+ "model.layers.19.mlp.gate_proj.weight": "model-00010-of-00038.safetensors",
143
+ "model.layers.19.mlp.up_proj.weight": "model-00010-of-00038.safetensors",
144
+ "model.layers.19.post_attention_layernorm.weight": "model-00010-of-00038.safetensors",
145
+ "model.layers.19.self_attn.k_proj.bias": "model-00010-of-00038.safetensors",
146
+ "model.layers.19.self_attn.k_proj.weight": "model-00010-of-00038.safetensors",
147
+ "model.layers.19.self_attn.o_proj.weight": "model-00010-of-00038.safetensors",
148
+ "model.layers.19.self_attn.q_proj.bias": "model-00010-of-00038.safetensors",
149
+ "model.layers.19.self_attn.q_proj.weight": "model-00010-of-00038.safetensors",
150
+ "model.layers.19.self_attn.v_proj.bias": "model-00010-of-00038.safetensors",
151
+ "model.layers.19.self_attn.v_proj.weight": "model-00010-of-00038.safetensors",
152
+ "model.layers.2.input_layernorm.weight": "model-00002-of-00038.safetensors",
153
+ "model.layers.2.mlp.down_proj.weight": "model-00002-of-00038.safetensors",
154
+ "model.layers.2.mlp.gate_proj.weight": "model-00002-of-00038.safetensors",
155
+ "model.layers.2.mlp.up_proj.weight": "model-00002-of-00038.safetensors",
156
+ "model.layers.2.post_attention_layernorm.weight": "model-00002-of-00038.safetensors",
157
+ "model.layers.2.self_attn.k_proj.bias": "model-00002-of-00038.safetensors",
158
+ "model.layers.2.self_attn.k_proj.weight": "model-00002-of-00038.safetensors",
159
+ "model.layers.2.self_attn.o_proj.weight": "model-00002-of-00038.safetensors",
160
+ "model.layers.2.self_attn.q_proj.bias": "model-00002-of-00038.safetensors",
161
+ "model.layers.2.self_attn.q_proj.weight": "model-00002-of-00038.safetensors",
162
+ "model.layers.2.self_attn.v_proj.bias": "model-00002-of-00038.safetensors",
163
+ "model.layers.2.self_attn.v_proj.weight": "model-00002-of-00038.safetensors",
164
+ "model.layers.20.input_layernorm.weight": "model-00011-of-00038.safetensors",
165
+ "model.layers.20.mlp.down_proj.weight": "model-00011-of-00038.safetensors",
166
+ "model.layers.20.mlp.gate_proj.weight": "model-00010-of-00038.safetensors",
167
+ "model.layers.20.mlp.up_proj.weight": "model-00011-of-00038.safetensors",
168
+ "model.layers.20.post_attention_layernorm.weight": "model-00011-of-00038.safetensors",
169
+ "model.layers.20.self_attn.k_proj.bias": "model-00010-of-00038.safetensors",
170
+ "model.layers.20.self_attn.k_proj.weight": "model-00010-of-00038.safetensors",
171
+ "model.layers.20.self_attn.o_proj.weight": "model-00010-of-00038.safetensors",
172
+ "model.layers.20.self_attn.q_proj.bias": "model-00010-of-00038.safetensors",
173
+ "model.layers.20.self_attn.q_proj.weight": "model-00010-of-00038.safetensors",
174
+ "model.layers.20.self_attn.v_proj.bias": "model-00010-of-00038.safetensors",
175
+ "model.layers.20.self_attn.v_proj.weight": "model-00010-of-00038.safetensors",
176
+ "model.layers.21.input_layernorm.weight": "model-00011-of-00038.safetensors",
177
+ "model.layers.21.mlp.down_proj.weight": "model-00011-of-00038.safetensors",
178
+ "model.layers.21.mlp.gate_proj.weight": "model-00011-of-00038.safetensors",
179
+ "model.layers.21.mlp.up_proj.weight": "model-00011-of-00038.safetensors",
180
+ "model.layers.21.post_attention_layernorm.weight": "model-00011-of-00038.safetensors",
181
+ "model.layers.21.self_attn.k_proj.bias": "model-00011-of-00038.safetensors",
182
+ "model.layers.21.self_attn.k_proj.weight": "model-00011-of-00038.safetensors",
183
+ "model.layers.21.self_attn.o_proj.weight": "model-00011-of-00038.safetensors",
184
+ "model.layers.21.self_attn.q_proj.bias": "model-00011-of-00038.safetensors",
185
+ "model.layers.21.self_attn.q_proj.weight": "model-00011-of-00038.safetensors",
186
+ "model.layers.21.self_attn.v_proj.bias": "model-00011-of-00038.safetensors",
187
+ "model.layers.21.self_attn.v_proj.weight": "model-00011-of-00038.safetensors",
188
+ "model.layers.22.input_layernorm.weight": "model-00012-of-00038.safetensors",
189
+ "model.layers.22.mlp.down_proj.weight": "model-00012-of-00038.safetensors",
190
+ "model.layers.22.mlp.gate_proj.weight": "model-00011-of-00038.safetensors",
191
+ "model.layers.22.mlp.up_proj.weight": "model-00011-of-00038.safetensors",
192
+ "model.layers.22.post_attention_layernorm.weight": "model-00012-of-00038.safetensors",
193
+ "model.layers.22.self_attn.k_proj.bias": "model-00011-of-00038.safetensors",
194
+ "model.layers.22.self_attn.k_proj.weight": "model-00011-of-00038.safetensors",
195
+ "model.layers.22.self_attn.o_proj.weight": "model-00011-of-00038.safetensors",
196
+ "model.layers.22.self_attn.q_proj.bias": "model-00011-of-00038.safetensors",
197
+ "model.layers.22.self_attn.q_proj.weight": "model-00011-of-00038.safetensors",
198
+ "model.layers.22.self_attn.v_proj.bias": "model-00011-of-00038.safetensors",
199
+ "model.layers.22.self_attn.v_proj.weight": "model-00011-of-00038.safetensors",
200
+ "model.layers.23.input_layernorm.weight": "model-00012-of-00038.safetensors",
201
+ "model.layers.23.mlp.down_proj.weight": "model-00012-of-00038.safetensors",
202
+ "model.layers.23.mlp.gate_proj.weight": "model-00012-of-00038.safetensors",
203
+ "model.layers.23.mlp.up_proj.weight": "model-00012-of-00038.safetensors",
204
+ "model.layers.23.post_attention_layernorm.weight": "model-00012-of-00038.safetensors",
205
+ "model.layers.23.self_attn.k_proj.bias": "model-00012-of-00038.safetensors",
206
+ "model.layers.23.self_attn.k_proj.weight": "model-00012-of-00038.safetensors",
207
+ "model.layers.23.self_attn.o_proj.weight": "model-00012-of-00038.safetensors",
208
+ "model.layers.23.self_attn.q_proj.bias": "model-00012-of-00038.safetensors",
209
+ "model.layers.23.self_attn.q_proj.weight": "model-00012-of-00038.safetensors",
210
+ "model.layers.23.self_attn.v_proj.bias": "model-00012-of-00038.safetensors",
211
+ "model.layers.23.self_attn.v_proj.weight": "model-00012-of-00038.safetensors",
212
+ "model.layers.24.input_layernorm.weight": "model-00012-of-00038.safetensors",
213
+ "model.layers.24.mlp.down_proj.weight": "model-00012-of-00038.safetensors",
214
+ "model.layers.24.mlp.gate_proj.weight": "model-00012-of-00038.safetensors",
215
+ "model.layers.24.mlp.up_proj.weight": "model-00012-of-00038.safetensors",
216
+ "model.layers.24.post_attention_layernorm.weight": "model-00012-of-00038.safetensors",
217
+ "model.layers.24.self_attn.k_proj.bias": "model-00012-of-00038.safetensors",
218
+ "model.layers.24.self_attn.k_proj.weight": "model-00012-of-00038.safetensors",
219
+ "model.layers.24.self_attn.o_proj.weight": "model-00012-of-00038.safetensors",
220
+ "model.layers.24.self_attn.q_proj.bias": "model-00012-of-00038.safetensors",
221
+ "model.layers.24.self_attn.q_proj.weight": "model-00012-of-00038.safetensors",
222
+ "model.layers.24.self_attn.v_proj.bias": "model-00012-of-00038.safetensors",
223
+ "model.layers.24.self_attn.v_proj.weight": "model-00012-of-00038.safetensors",
224
+ "model.layers.25.input_layernorm.weight": "model-00013-of-00038.safetensors",
225
+ "model.layers.25.mlp.down_proj.weight": "model-00013-of-00038.safetensors",
226
+ "model.layers.25.mlp.gate_proj.weight": "model-00013-of-00038.safetensors",
227
+ "model.layers.25.mlp.up_proj.weight": "model-00013-of-00038.safetensors",
228
+ "model.layers.25.post_attention_layernorm.weight": "model-00013-of-00038.safetensors",
229
+ "model.layers.25.self_attn.k_proj.bias": "model-00013-of-00038.safetensors",
230
+ "model.layers.25.self_attn.k_proj.weight": "model-00013-of-00038.safetensors",
231
+ "model.layers.25.self_attn.o_proj.weight": "model-00013-of-00038.safetensors",
232
+ "model.layers.25.self_attn.q_proj.bias": "model-00013-of-00038.safetensors",
233
+ "model.layers.25.self_attn.q_proj.weight": "model-00013-of-00038.safetensors",
234
+ "model.layers.25.self_attn.v_proj.bias": "model-00013-of-00038.safetensors",
235
+ "model.layers.25.self_attn.v_proj.weight": "model-00013-of-00038.safetensors",
236
+ "model.layers.26.input_layernorm.weight": "model-00013-of-00038.safetensors",
237
+ "model.layers.26.mlp.down_proj.weight": "model-00013-of-00038.safetensors",
238
+ "model.layers.26.mlp.gate_proj.weight": "model-00013-of-00038.safetensors",
239
+ "model.layers.26.mlp.up_proj.weight": "model-00013-of-00038.safetensors",
240
+ "model.layers.26.post_attention_layernorm.weight": "model-00013-of-00038.safetensors",
241
+ "model.layers.26.self_attn.k_proj.bias": "model-00013-of-00038.safetensors",
242
+ "model.layers.26.self_attn.k_proj.weight": "model-00013-of-00038.safetensors",
243
+ "model.layers.26.self_attn.o_proj.weight": "model-00013-of-00038.safetensors",
244
+ "model.layers.26.self_attn.q_proj.bias": "model-00013-of-00038.safetensors",
245
+ "model.layers.26.self_attn.q_proj.weight": "model-00013-of-00038.safetensors",
246
+ "model.layers.26.self_attn.v_proj.bias": "model-00013-of-00038.safetensors",
247
+ "model.layers.26.self_attn.v_proj.weight": "model-00013-of-00038.safetensors",
248
+ "model.layers.27.input_layernorm.weight": "model-00014-of-00038.safetensors",
249
+ "model.layers.27.mlp.down_proj.weight": "model-00014-of-00038.safetensors",
250
+ "model.layers.27.mlp.gate_proj.weight": "model-00014-of-00038.safetensors",
251
+ "model.layers.27.mlp.up_proj.weight": "model-00014-of-00038.safetensors",
252
+ "model.layers.27.post_attention_layernorm.weight": "model-00014-of-00038.safetensors",
253
+ "model.layers.27.self_attn.k_proj.bias": "model-00013-of-00038.safetensors",
254
+ "model.layers.27.self_attn.k_proj.weight": "model-00013-of-00038.safetensors",
255
+ "model.layers.27.self_attn.o_proj.weight": "model-00014-of-00038.safetensors",
256
+ "model.layers.27.self_attn.q_proj.bias": "model-00013-of-00038.safetensors",
257
+ "model.layers.27.self_attn.q_proj.weight": "model-00013-of-00038.safetensors",
258
+ "model.layers.27.self_attn.v_proj.bias": "model-00013-of-00038.safetensors",
259
+ "model.layers.27.self_attn.v_proj.weight": "model-00013-of-00038.safetensors",
260
+ "model.layers.28.input_layernorm.weight": "model-00014-of-00038.safetensors",
261
+ "model.layers.28.mlp.down_proj.weight": "model-00014-of-00038.safetensors",
262
+ "model.layers.28.mlp.gate_proj.weight": "model-00014-of-00038.safetensors",
263
+ "model.layers.28.mlp.up_proj.weight": "model-00014-of-00038.safetensors",
264
+ "model.layers.28.post_attention_layernorm.weight": "model-00014-of-00038.safetensors",
265
+ "model.layers.28.self_attn.k_proj.bias": "model-00014-of-00038.safetensors",
266
+ "model.layers.28.self_attn.k_proj.weight": "model-00014-of-00038.safetensors",
267
+ "model.layers.28.self_attn.o_proj.weight": "model-00014-of-00038.safetensors",
268
+ "model.layers.28.self_attn.q_proj.bias": "model-00014-of-00038.safetensors",
269
+ "model.layers.28.self_attn.q_proj.weight": "model-00014-of-00038.safetensors",
270
+ "model.layers.28.self_attn.v_proj.bias": "model-00014-of-00038.safetensors",
271
+ "model.layers.28.self_attn.v_proj.weight": "model-00014-of-00038.safetensors",
272
+ "model.layers.29.input_layernorm.weight": "model-00015-of-00038.safetensors",
273
+ "model.layers.29.mlp.down_proj.weight": "model-00015-of-00038.safetensors",
274
+ "model.layers.29.mlp.gate_proj.weight": "model-00015-of-00038.safetensors",
275
+ "model.layers.29.mlp.up_proj.weight": "model-00015-of-00038.safetensors",
276
+ "model.layers.29.post_attention_layernorm.weight": "model-00015-of-00038.safetensors",
277
+ "model.layers.29.self_attn.k_proj.bias": "model-00014-of-00038.safetensors",
278
+ "model.layers.29.self_attn.k_proj.weight": "model-00014-of-00038.safetensors",
279
+ "model.layers.29.self_attn.o_proj.weight": "model-00014-of-00038.safetensors",
280
+ "model.layers.29.self_attn.q_proj.bias": "model-00014-of-00038.safetensors",
281
+ "model.layers.29.self_attn.q_proj.weight": "model-00014-of-00038.safetensors",
282
+ "model.layers.29.self_attn.v_proj.bias": "model-00014-of-00038.safetensors",
283
+ "model.layers.29.self_attn.v_proj.weight": "model-00014-of-00038.safetensors",
284
+ "model.layers.3.input_layernorm.weight": "model-00003-of-00038.safetensors",
285
+ "model.layers.3.mlp.down_proj.weight": "model-00003-of-00038.safetensors",
286
+ "model.layers.3.mlp.gate_proj.weight": "model-00003-of-00038.safetensors",
287
+ "model.layers.3.mlp.up_proj.weight": "model-00003-of-00038.safetensors",
288
+ "model.layers.3.post_attention_layernorm.weight": "model-00003-of-00038.safetensors",
289
+ "model.layers.3.self_attn.k_proj.bias": "model-00003-of-00038.safetensors",
290
+ "model.layers.3.self_attn.k_proj.weight": "model-00003-of-00038.safetensors",
291
+ "model.layers.3.self_attn.o_proj.weight": "model-00003-of-00038.safetensors",
292
+ "model.layers.3.self_attn.q_proj.bias": "model-00003-of-00038.safetensors",
293
+ "model.layers.3.self_attn.q_proj.weight": "model-00003-of-00038.safetensors",
294
+ "model.layers.3.self_attn.v_proj.bias": "model-00003-of-00038.safetensors",
295
+ "model.layers.3.self_attn.v_proj.weight": "model-00003-of-00038.safetensors",
296
+ "model.layers.30.input_layernorm.weight": "model-00015-of-00038.safetensors",
297
+ "model.layers.30.mlp.down_proj.weight": "model-00015-of-00038.safetensors",
298
+ "model.layers.30.mlp.gate_proj.weight": "model-00015-of-00038.safetensors",
299
+ "model.layers.30.mlp.up_proj.weight": "model-00015-of-00038.safetensors",
300
+ "model.layers.30.post_attention_layernorm.weight": "model-00015-of-00038.safetensors",
301
+ "model.layers.30.self_attn.k_proj.bias": "model-00015-of-00038.safetensors",
302
+ "model.layers.30.self_attn.k_proj.weight": "model-00015-of-00038.safetensors",
303
+ "model.layers.30.self_attn.o_proj.weight": "model-00015-of-00038.safetensors",
304
+ "model.layers.30.self_attn.q_proj.bias": "model-00015-of-00038.safetensors",
305
+ "model.layers.30.self_attn.q_proj.weight": "model-00015-of-00038.safetensors",
306
+ "model.layers.30.self_attn.v_proj.bias": "model-00015-of-00038.safetensors",
307
+ "model.layers.30.self_attn.v_proj.weight": "model-00015-of-00038.safetensors",
308
+ "model.layers.31.input_layernorm.weight": "model-00016-of-00038.safetensors",
309
+ "model.layers.31.mlp.down_proj.weight": "model-00016-of-00038.safetensors",
310
+ "model.layers.31.mlp.gate_proj.weight": "model-00015-of-00038.safetensors",
311
+ "model.layers.31.mlp.up_proj.weight": "model-00016-of-00038.safetensors",
312
+ "model.layers.31.post_attention_layernorm.weight": "model-00016-of-00038.safetensors",
313
+ "model.layers.31.self_attn.k_proj.bias": "model-00015-of-00038.safetensors",
314
+ "model.layers.31.self_attn.k_proj.weight": "model-00015-of-00038.safetensors",
315
+ "model.layers.31.self_attn.o_proj.weight": "model-00015-of-00038.safetensors",
316
+ "model.layers.31.self_attn.q_proj.bias": "model-00015-of-00038.safetensors",
317
+ "model.layers.31.self_attn.q_proj.weight": "model-00015-of-00038.safetensors",
318
+ "model.layers.31.self_attn.v_proj.bias": "model-00015-of-00038.safetensors",
319
+ "model.layers.31.self_attn.v_proj.weight": "model-00015-of-00038.safetensors",
320
+ "model.layers.32.input_layernorm.weight": "model-00016-of-00038.safetensors",
321
+ "model.layers.32.mlp.down_proj.weight": "model-00016-of-00038.safetensors",
322
+ "model.layers.32.mlp.gate_proj.weight": "model-00016-of-00038.safetensors",
323
+ "model.layers.32.mlp.up_proj.weight": "model-00016-of-00038.safetensors",
324
+ "model.layers.32.post_attention_layernorm.weight": "model-00016-of-00038.safetensors",
325
+ "model.layers.32.self_attn.k_proj.bias": "model-00016-of-00038.safetensors",
326
+ "model.layers.32.self_attn.k_proj.weight": "model-00016-of-00038.safetensors",
327
+ "model.layers.32.self_attn.o_proj.weight": "model-00016-of-00038.safetensors",
328
+ "model.layers.32.self_attn.q_proj.bias": "model-00016-of-00038.safetensors",
329
+ "model.layers.32.self_attn.q_proj.weight": "model-00016-of-00038.safetensors",
330
+ "model.layers.32.self_attn.v_proj.bias": "model-00016-of-00038.safetensors",
331
+ "model.layers.32.self_attn.v_proj.weight": "model-00016-of-00038.safetensors",
332
+ "model.layers.33.input_layernorm.weight": "model-00017-of-00038.safetensors",
333
+ "model.layers.33.mlp.down_proj.weight": "model-00017-of-00038.safetensors",
334
+ "model.layers.33.mlp.gate_proj.weight": "model-00016-of-00038.safetensors",
335
+ "model.layers.33.mlp.up_proj.weight": "model-00016-of-00038.safetensors",
336
+ "model.layers.33.post_attention_layernorm.weight": "model-00017-of-00038.safetensors",
337
+ "model.layers.33.self_attn.k_proj.bias": "model-00016-of-00038.safetensors",
338
+ "model.layers.33.self_attn.k_proj.weight": "model-00016-of-00038.safetensors",
339
+ "model.layers.33.self_attn.o_proj.weight": "model-00016-of-00038.safetensors",
340
+ "model.layers.33.self_attn.q_proj.bias": "model-00016-of-00038.safetensors",
341
+ "model.layers.33.self_attn.q_proj.weight": "model-00016-of-00038.safetensors",
342
+ "model.layers.33.self_attn.v_proj.bias": "model-00016-of-00038.safetensors",
343
+ "model.layers.33.self_attn.v_proj.weight": "model-00016-of-00038.safetensors",
344
+ "model.layers.34.input_layernorm.weight": "model-00017-of-00038.safetensors",
345
+ "model.layers.34.mlp.down_proj.weight": "model-00017-of-00038.safetensors",
346
+ "model.layers.34.mlp.gate_proj.weight": "model-00017-of-00038.safetensors",
347
+ "model.layers.34.mlp.up_proj.weight": "model-00017-of-00038.safetensors",
348
+ "model.layers.34.post_attention_layernorm.weight": "model-00017-of-00038.safetensors",
349
+ "model.layers.34.self_attn.k_proj.bias": "model-00017-of-00038.safetensors",
350
+ "model.layers.34.self_attn.k_proj.weight": "model-00017-of-00038.safetensors",
351
+ "model.layers.34.self_attn.o_proj.weight": "model-00017-of-00038.safetensors",
352
+ "model.layers.34.self_attn.q_proj.bias": "model-00017-of-00038.safetensors",
353
+ "model.layers.34.self_attn.q_proj.weight": "model-00017-of-00038.safetensors",
354
+ "model.layers.34.self_attn.v_proj.bias": "model-00017-of-00038.safetensors",
355
+ "model.layers.34.self_attn.v_proj.weight": "model-00017-of-00038.safetensors",
356
+ "model.layers.35.input_layernorm.weight": "model-00017-of-00038.safetensors",
357
+ "model.layers.35.mlp.down_proj.weight": "model-00017-of-00038.safetensors",
358
+ "model.layers.35.mlp.gate_proj.weight": "model-00017-of-00038.safetensors",
359
+ "model.layers.35.mlp.up_proj.weight": "model-00017-of-00038.safetensors",
360
+ "model.layers.35.post_attention_layernorm.weight": "model-00017-of-00038.safetensors",
361
+ "model.layers.35.self_attn.k_proj.bias": "model-00017-of-00038.safetensors",
362
+ "model.layers.35.self_attn.k_proj.weight": "model-00017-of-00038.safetensors",
363
+ "model.layers.35.self_attn.o_proj.weight": "model-00017-of-00038.safetensors",
364
+ "model.layers.35.self_attn.q_proj.bias": "model-00017-of-00038.safetensors",
365
+ "model.layers.35.self_attn.q_proj.weight": "model-00017-of-00038.safetensors",
366
+ "model.layers.35.self_attn.v_proj.bias": "model-00017-of-00038.safetensors",
367
+ "model.layers.35.self_attn.v_proj.weight": "model-00017-of-00038.safetensors",
368
+ "model.layers.36.input_layernorm.weight": "model-00018-of-00038.safetensors",
369
+ "model.layers.36.mlp.down_proj.weight": "model-00018-of-00038.safetensors",
370
+ "model.layers.36.mlp.gate_proj.weight": "model-00018-of-00038.safetensors",
371
+ "model.layers.36.mlp.up_proj.weight": "model-00018-of-00038.safetensors",
372
+ "model.layers.36.post_attention_layernorm.weight": "model-00018-of-00038.safetensors",
373
+ "model.layers.36.self_attn.k_proj.bias": "model-00018-of-00038.safetensors",
374
+ "model.layers.36.self_attn.k_proj.weight": "model-00018-of-00038.safetensors",
375
+ "model.layers.36.self_attn.o_proj.weight": "model-00018-of-00038.safetensors",
376
+ "model.layers.36.self_attn.q_proj.bias": "model-00018-of-00038.safetensors",
377
+ "model.layers.36.self_attn.q_proj.weight": "model-00018-of-00038.safetensors",
378
+ "model.layers.36.self_attn.v_proj.bias": "model-00018-of-00038.safetensors",
379
+ "model.layers.36.self_attn.v_proj.weight": "model-00018-of-00038.safetensors",
380
+ "model.layers.37.input_layernorm.weight": "model-00018-of-00038.safetensors",
381
+ "model.layers.37.mlp.down_proj.weight": "model-00018-of-00038.safetensors",
382
+ "model.layers.37.mlp.gate_proj.weight": "model-00018-of-00038.safetensors",
383
+ "model.layers.37.mlp.up_proj.weight": "model-00018-of-00038.safetensors",
384
+ "model.layers.37.post_attention_layernorm.weight": "model-00018-of-00038.safetensors",
385
+ "model.layers.37.self_attn.k_proj.bias": "model-00018-of-00038.safetensors",
386
+ "model.layers.37.self_attn.k_proj.weight": "model-00018-of-00038.safetensors",
387
+ "model.layers.37.self_attn.o_proj.weight": "model-00018-of-00038.safetensors",
388
+ "model.layers.37.self_attn.q_proj.bias": "model-00018-of-00038.safetensors",
389
+ "model.layers.37.self_attn.q_proj.weight": "model-00018-of-00038.safetensors",
390
+ "model.layers.37.self_attn.v_proj.bias": "model-00018-of-00038.safetensors",
391
+ "model.layers.37.self_attn.v_proj.weight": "model-00018-of-00038.safetensors",
392
+ "model.layers.38.input_layernorm.weight": "model-00019-of-00038.safetensors",
393
+ "model.layers.38.mlp.down_proj.weight": "model-00019-of-00038.safetensors",
394
+ "model.layers.38.mlp.gate_proj.weight": "model-00019-of-00038.safetensors",
395
+ "model.layers.38.mlp.up_proj.weight": "model-00019-of-00038.safetensors",
396
+ "model.layers.38.post_attention_layernorm.weight": "model-00019-of-00038.safetensors",
397
+ "model.layers.38.self_attn.k_proj.bias": "model-00018-of-00038.safetensors",
398
+ "model.layers.38.self_attn.k_proj.weight": "model-00018-of-00038.safetensors",
399
+ "model.layers.38.self_attn.o_proj.weight": "model-00019-of-00038.safetensors",
400
+ "model.layers.38.self_attn.q_proj.bias": "model-00018-of-00038.safetensors",
401
+ "model.layers.38.self_attn.q_proj.weight": "model-00018-of-00038.safetensors",
402
+ "model.layers.38.self_attn.v_proj.bias": "model-00018-of-00038.safetensors",
403
+ "model.layers.38.self_attn.v_proj.weight": "model-00018-of-00038.safetensors",
404
+ "model.layers.39.input_layernorm.weight": "model-00019-of-00038.safetensors",
405
+ "model.layers.39.mlp.down_proj.weight": "model-00019-of-00038.safetensors",
406
+ "model.layers.39.mlp.gate_proj.weight": "model-00019-of-00038.safetensors",
407
+ "model.layers.39.mlp.up_proj.weight": "model-00019-of-00038.safetensors",
408
+ "model.layers.39.post_attention_layernorm.weight": "model-00019-of-00038.safetensors",
409
+ "model.layers.39.self_attn.k_proj.bias": "model-00019-of-00038.safetensors",
410
+ "model.layers.39.self_attn.k_proj.weight": "model-00019-of-00038.safetensors",
411
+ "model.layers.39.self_attn.o_proj.weight": "model-00019-of-00038.safetensors",
412
+ "model.layers.39.self_attn.q_proj.bias": "model-00019-of-00038.safetensors",
413
+ "model.layers.39.self_attn.q_proj.weight": "model-00019-of-00038.safetensors",
414
+ "model.layers.39.self_attn.v_proj.bias": "model-00019-of-00038.safetensors",
415
+ "model.layers.39.self_attn.v_proj.weight": "model-00019-of-00038.safetensors",
416
+ "model.layers.4.input_layernorm.weight": "model-00003-of-00038.safetensors",
417
+ "model.layers.4.mlp.down_proj.weight": "model-00003-of-00038.safetensors",
418
+ "model.layers.4.mlp.gate_proj.weight": "model-00003-of-00038.safetensors",
419
+ "model.layers.4.mlp.up_proj.weight": "model-00003-of-00038.safetensors",
420
+ "model.layers.4.post_attention_layernorm.weight": "model-00003-of-00038.safetensors",
421
+ "model.layers.4.self_attn.k_proj.bias": "model-00003-of-00038.safetensors",
422
+ "model.layers.4.self_attn.k_proj.weight": "model-00003-of-00038.safetensors",
423
+ "model.layers.4.self_attn.o_proj.weight": "model-00003-of-00038.safetensors",
424
+ "model.layers.4.self_attn.q_proj.bias": "model-00003-of-00038.safetensors",
425
+ "model.layers.4.self_attn.q_proj.weight": "model-00003-of-00038.safetensors",
426
+ "model.layers.4.self_attn.v_proj.bias": "model-00003-of-00038.safetensors",
427
+ "model.layers.4.self_attn.v_proj.weight": "model-00003-of-00038.safetensors",
428
+ "model.layers.40.input_layernorm.weight": "model-00020-of-00038.safetensors",
429
+ "model.layers.40.mlp.down_proj.weight": "model-00020-of-00038.safetensors",
430
+ "model.layers.40.mlp.gate_proj.weight": "model-00020-of-00038.safetensors",
431
+ "model.layers.40.mlp.up_proj.weight": "model-00020-of-00038.safetensors",
432
+ "model.layers.40.post_attention_layernorm.weight": "model-00020-of-00038.safetensors",
433
+ "model.layers.40.self_attn.k_proj.bias": "model-00019-of-00038.safetensors",
434
+ "model.layers.40.self_attn.k_proj.weight": "model-00019-of-00038.safetensors",
435
+ "model.layers.40.self_attn.o_proj.weight": "model-00019-of-00038.safetensors",
436
+ "model.layers.40.self_attn.q_proj.bias": "model-00019-of-00038.safetensors",
437
+ "model.layers.40.self_attn.q_proj.weight": "model-00019-of-00038.safetensors",
438
+ "model.layers.40.self_attn.v_proj.bias": "model-00019-of-00038.safetensors",
439
+ "model.layers.40.self_attn.v_proj.weight": "model-00019-of-00038.safetensors",
440
+ "model.layers.41.input_layernorm.weight": "model-00020-of-00038.safetensors",
441
+ "model.layers.41.mlp.down_proj.weight": "model-00020-of-00038.safetensors",
442
+ "model.layers.41.mlp.gate_proj.weight": "model-00020-of-00038.safetensors",
443
+ "model.layers.41.mlp.up_proj.weight": "model-00020-of-00038.safetensors",
444
+ "model.layers.41.post_attention_layernorm.weight": "model-00020-of-00038.safetensors",
445
+ "model.layers.41.self_attn.k_proj.bias": "model-00020-of-00038.safetensors",
446
+ "model.layers.41.self_attn.k_proj.weight": "model-00020-of-00038.safetensors",
447
+ "model.layers.41.self_attn.o_proj.weight": "model-00020-of-00038.safetensors",
448
+ "model.layers.41.self_attn.q_proj.bias": "model-00020-of-00038.safetensors",
449
+ "model.layers.41.self_attn.q_proj.weight": "model-00020-of-00038.safetensors",
450
+ "model.layers.41.self_attn.v_proj.bias": "model-00020-of-00038.safetensors",
451
+ "model.layers.41.self_attn.v_proj.weight": "model-00020-of-00038.safetensors",
452
+ "model.layers.42.input_layernorm.weight": "model-00021-of-00038.safetensors",
453
+ "model.layers.42.mlp.down_proj.weight": "model-00021-of-00038.safetensors",
454
+ "model.layers.42.mlp.gate_proj.weight": "model-00020-of-00038.safetensors",
455
+ "model.layers.42.mlp.up_proj.weight": "model-00021-of-00038.safetensors",
456
+ "model.layers.42.post_attention_layernorm.weight": "model-00021-of-00038.safetensors",
457
+ "model.layers.42.self_attn.k_proj.bias": "model-00020-of-00038.safetensors",
458
+ "model.layers.42.self_attn.k_proj.weight": "model-00020-of-00038.safetensors",
459
+ "model.layers.42.self_attn.o_proj.weight": "model-00020-of-00038.safetensors",
460
+ "model.layers.42.self_attn.q_proj.bias": "model-00020-of-00038.safetensors",
461
+ "model.layers.42.self_attn.q_proj.weight": "model-00020-of-00038.safetensors",
462
+ "model.layers.42.self_attn.v_proj.bias": "model-00020-of-00038.safetensors",
463
+ "model.layers.42.self_attn.v_proj.weight": "model-00020-of-00038.safetensors",
464
+ "model.layers.43.input_layernorm.weight": "model-00021-of-00038.safetensors",
465
+ "model.layers.43.mlp.down_proj.weight": "model-00021-of-00038.safetensors",
466
+ "model.layers.43.mlp.gate_proj.weight": "model-00021-of-00038.safetensors",
467
+ "model.layers.43.mlp.up_proj.weight": "model-00021-of-00038.safetensors",
468
+ "model.layers.43.post_attention_layernorm.weight": "model-00021-of-00038.safetensors",
469
+ "model.layers.43.self_attn.k_proj.bias": "model-00021-of-00038.safetensors",
470
+ "model.layers.43.self_attn.k_proj.weight": "model-00021-of-00038.safetensors",
471
+ "model.layers.43.self_attn.o_proj.weight": "model-00021-of-00038.safetensors",
472
+ "model.layers.43.self_attn.q_proj.bias": "model-00021-of-00038.safetensors",
473
+ "model.layers.43.self_attn.q_proj.weight": "model-00021-of-00038.safetensors",
474
+ "model.layers.43.self_attn.v_proj.bias": "model-00021-of-00038.safetensors",
475
+ "model.layers.43.self_attn.v_proj.weight": "model-00021-of-00038.safetensors",
476
+ "model.layers.44.input_layernorm.weight": "model-00022-of-00038.safetensors",
477
+ "model.layers.44.mlp.down_proj.weight": "model-00022-of-00038.safetensors",
478
+ "model.layers.44.mlp.gate_proj.weight": "model-00021-of-00038.safetensors",
479
+ "model.layers.44.mlp.up_proj.weight": "model-00021-of-00038.safetensors",
480
+ "model.layers.44.post_attention_layernorm.weight": "model-00022-of-00038.safetensors",
481
+ "model.layers.44.self_attn.k_proj.bias": "model-00021-of-00038.safetensors",
482
+ "model.layers.44.self_attn.k_proj.weight": "model-00021-of-00038.safetensors",
483
+ "model.layers.44.self_attn.o_proj.weight": "model-00021-of-00038.safetensors",
484
+ "model.layers.44.self_attn.q_proj.bias": "model-00021-of-00038.safetensors",
485
+ "model.layers.44.self_attn.q_proj.weight": "model-00021-of-00038.safetensors",
486
+ "model.layers.44.self_attn.v_proj.bias": "model-00021-of-00038.safetensors",
487
+ "model.layers.44.self_attn.v_proj.weight": "model-00021-of-00038.safetensors",
488
+ "model.layers.45.input_layernorm.weight": "model-00022-of-00038.safetensors",
489
+ "model.layers.45.mlp.down_proj.weight": "model-00022-of-00038.safetensors",
490
+ "model.layers.45.mlp.gate_proj.weight": "model-00022-of-00038.safetensors",
491
+ "model.layers.45.mlp.up_proj.weight": "model-00022-of-00038.safetensors",
492
+ "model.layers.45.post_attention_layernorm.weight": "model-00022-of-00038.safetensors",
493
+ "model.layers.45.self_attn.k_proj.bias": "model-00022-of-00038.safetensors",
494
+ "model.layers.45.self_attn.k_proj.weight": "model-00022-of-00038.safetensors",
495
+ "model.layers.45.self_attn.o_proj.weight": "model-00022-of-00038.safetensors",
496
+ "model.layers.45.self_attn.q_proj.bias": "model-00022-of-00038.safetensors",
497
+ "model.layers.45.self_attn.q_proj.weight": "model-00022-of-00038.safetensors",
498
+ "model.layers.45.self_attn.v_proj.bias": "model-00022-of-00038.safetensors",
499
+ "model.layers.45.self_attn.v_proj.weight": "model-00022-of-00038.safetensors",
500
+ "model.layers.46.input_layernorm.weight": "model-00022-of-00038.safetensors",
501
+ "model.layers.46.mlp.down_proj.weight": "model-00022-of-00038.safetensors",
502
+ "model.layers.46.mlp.gate_proj.weight": "model-00022-of-00038.safetensors",
503
+ "model.layers.46.mlp.up_proj.weight": "model-00022-of-00038.safetensors",
504
+ "model.layers.46.post_attention_layernorm.weight": "model-00022-of-00038.safetensors",
505
+ "model.layers.46.self_attn.k_proj.bias": "model-00022-of-00038.safetensors",
506
+ "model.layers.46.self_attn.k_proj.weight": "model-00022-of-00038.safetensors",
507
+ "model.layers.46.self_attn.o_proj.weight": "model-00022-of-00038.safetensors",
508
+ "model.layers.46.self_attn.q_proj.bias": "model-00022-of-00038.safetensors",
509
+ "model.layers.46.self_attn.q_proj.weight": "model-00022-of-00038.safetensors",
510
+ "model.layers.46.self_attn.v_proj.bias": "model-00022-of-00038.safetensors",
511
+ "model.layers.46.self_attn.v_proj.weight": "model-00022-of-00038.safetensors",
512
+ "model.layers.47.input_layernorm.weight": "model-00023-of-00038.safetensors",
513
+ "model.layers.47.mlp.down_proj.weight": "model-00023-of-00038.safetensors",
514
+ "model.layers.47.mlp.gate_proj.weight": "model-00023-of-00038.safetensors",
515
+ "model.layers.47.mlp.up_proj.weight": "model-00023-of-00038.safetensors",
516
+ "model.layers.47.post_attention_layernorm.weight": "model-00023-of-00038.safetensors",
517
+ "model.layers.47.self_attn.k_proj.bias": "model-00023-of-00038.safetensors",
518
+ "model.layers.47.self_attn.k_proj.weight": "model-00023-of-00038.safetensors",
519
+ "model.layers.47.self_attn.o_proj.weight": "model-00023-of-00038.safetensors",
520
+ "model.layers.47.self_attn.q_proj.bias": "model-00023-of-00038.safetensors",
521
+ "model.layers.47.self_attn.q_proj.weight": "model-00023-of-00038.safetensors",
522
+ "model.layers.47.self_attn.v_proj.bias": "model-00023-of-00038.safetensors",
523
+ "model.layers.47.self_attn.v_proj.weight": "model-00023-of-00038.safetensors",
524
+ "model.layers.48.input_layernorm.weight": "model-00023-of-00038.safetensors",
525
+ "model.layers.48.mlp.down_proj.weight": "model-00023-of-00038.safetensors",
526
+ "model.layers.48.mlp.gate_proj.weight": "model-00023-of-00038.safetensors",
527
+ "model.layers.48.mlp.up_proj.weight": "model-00023-of-00038.safetensors",
528
+ "model.layers.48.post_attention_layernorm.weight": "model-00023-of-00038.safetensors",
529
+ "model.layers.48.self_attn.k_proj.bias": "model-00023-of-00038.safetensors",
530
+ "model.layers.48.self_attn.k_proj.weight": "model-00023-of-00038.safetensors",
531
+ "model.layers.48.self_attn.o_proj.weight": "model-00023-of-00038.safetensors",
532
+ "model.layers.48.self_attn.q_proj.bias": "model-00023-of-00038.safetensors",
533
+ "model.layers.48.self_attn.q_proj.weight": "model-00023-of-00038.safetensors",
534
+ "model.layers.48.self_attn.v_proj.bias": "model-00023-of-00038.safetensors",
535
+ "model.layers.48.self_attn.v_proj.weight": "model-00023-of-00038.safetensors",
536
+ "model.layers.49.input_layernorm.weight": "model-00024-of-00038.safetensors",
537
+ "model.layers.49.mlp.down_proj.weight": "model-00024-of-00038.safetensors",
538
+ "model.layers.49.mlp.gate_proj.weight": "model-00024-of-00038.safetensors",
539
+ "model.layers.49.mlp.up_proj.weight": "model-00024-of-00038.safetensors",
540
+ "model.layers.49.post_attention_layernorm.weight": "model-00024-of-00038.safetensors",
541
+ "model.layers.49.self_attn.k_proj.bias": "model-00023-of-00038.safetensors",
542
+ "model.layers.49.self_attn.k_proj.weight": "model-00023-of-00038.safetensors",
543
+ "model.layers.49.self_attn.o_proj.weight": "model-00024-of-00038.safetensors",
544
+ "model.layers.49.self_attn.q_proj.bias": "model-00023-of-00038.safetensors",
545
+ "model.layers.49.self_attn.q_proj.weight": "model-00023-of-00038.safetensors",
546
+ "model.layers.49.self_attn.v_proj.bias": "model-00023-of-00038.safetensors",
547
+ "model.layers.49.self_attn.v_proj.weight": "model-00023-of-00038.safetensors",
548
+ "model.layers.5.input_layernorm.weight": "model-00004-of-00038.safetensors",
549
+ "model.layers.5.mlp.down_proj.weight": "model-00004-of-00038.safetensors",
550
+ "model.layers.5.mlp.gate_proj.weight": "model-00004-of-00038.safetensors",
551
+ "model.layers.5.mlp.up_proj.weight": "model-00004-of-00038.safetensors",
552
+ "model.layers.5.post_attention_layernorm.weight": "model-00004-of-00038.safetensors",
553
+ "model.layers.5.self_attn.k_proj.bias": "model-00003-of-00038.safetensors",
554
+ "model.layers.5.self_attn.k_proj.weight": "model-00003-of-00038.safetensors",
555
+ "model.layers.5.self_attn.o_proj.weight": "model-00004-of-00038.safetensors",
556
+ "model.layers.5.self_attn.q_proj.bias": "model-00003-of-00038.safetensors",
557
+ "model.layers.5.self_attn.q_proj.weight": "model-00003-of-00038.safetensors",
558
+ "model.layers.5.self_attn.v_proj.bias": "model-00003-of-00038.safetensors",
559
+ "model.layers.5.self_attn.v_proj.weight": "model-00003-of-00038.safetensors",
560
+ "model.layers.50.input_layernorm.weight": "model-00024-of-00038.safetensors",
561
+ "model.layers.50.mlp.down_proj.weight": "model-00024-of-00038.safetensors",
562
+ "model.layers.50.mlp.gate_proj.weight": "model-00024-of-00038.safetensors",
563
+ "model.layers.50.mlp.up_proj.weight": "model-00024-of-00038.safetensors",
564
+ "model.layers.50.post_attention_layernorm.weight": "model-00024-of-00038.safetensors",
565
+ "model.layers.50.self_attn.k_proj.bias": "model-00024-of-00038.safetensors",
566
+ "model.layers.50.self_attn.k_proj.weight": "model-00024-of-00038.safetensors",
567
+ "model.layers.50.self_attn.o_proj.weight": "model-00024-of-00038.safetensors",
568
+ "model.layers.50.self_attn.q_proj.bias": "model-00024-of-00038.safetensors",
569
+ "model.layers.50.self_attn.q_proj.weight": "model-00024-of-00038.safetensors",
570
+ "model.layers.50.self_attn.v_proj.bias": "model-00024-of-00038.safetensors",
571
+ "model.layers.50.self_attn.v_proj.weight": "model-00024-of-00038.safetensors",
572
+ "model.layers.51.input_layernorm.weight": "model-00025-of-00038.safetensors",
573
+ "model.layers.51.mlp.down_proj.weight": "model-00025-of-00038.safetensors",
574
+ "model.layers.51.mlp.gate_proj.weight": "model-00025-of-00038.safetensors",
575
+ "model.layers.51.mlp.up_proj.weight": "model-00025-of-00038.safetensors",
576
+ "model.layers.51.post_attention_layernorm.weight": "model-00025-of-00038.safetensors",
577
+ "model.layers.51.self_attn.k_proj.bias": "model-00024-of-00038.safetensors",
578
+ "model.layers.51.self_attn.k_proj.weight": "model-00024-of-00038.safetensors",
579
+ "model.layers.51.self_attn.o_proj.weight": "model-00024-of-00038.safetensors",
580
+ "model.layers.51.self_attn.q_proj.bias": "model-00024-of-00038.safetensors",
581
+ "model.layers.51.self_attn.q_proj.weight": "model-00024-of-00038.safetensors",
582
+ "model.layers.51.self_attn.v_proj.bias": "model-00024-of-00038.safetensors",
583
+ "model.layers.51.self_attn.v_proj.weight": "model-00024-of-00038.safetensors",
584
+ "model.layers.52.input_layernorm.weight": "model-00025-of-00038.safetensors",
585
+ "model.layers.52.mlp.down_proj.weight": "model-00025-of-00038.safetensors",
586
+ "model.layers.52.mlp.gate_proj.weight": "model-00025-of-00038.safetensors",
587
+ "model.layers.52.mlp.up_proj.weight": "model-00025-of-00038.safetensors",
588
+ "model.layers.52.post_attention_layernorm.weight": "model-00025-of-00038.safetensors",
589
+ "model.layers.52.self_attn.k_proj.bias": "model-00025-of-00038.safetensors",
590
+ "model.layers.52.self_attn.k_proj.weight": "model-00025-of-00038.safetensors",
591
+ "model.layers.52.self_attn.o_proj.weight": "model-00025-of-00038.safetensors",
592
+ "model.layers.52.self_attn.q_proj.bias": "model-00025-of-00038.safetensors",
593
+ "model.layers.52.self_attn.q_proj.weight": "model-00025-of-00038.safetensors",
594
+ "model.layers.52.self_attn.v_proj.bias": "model-00025-of-00038.safetensors",
595
+ "model.layers.52.self_attn.v_proj.weight": "model-00025-of-00038.safetensors",
596
+ "model.layers.53.input_layernorm.weight": "model-00026-of-00038.safetensors",
597
+ "model.layers.53.mlp.down_proj.weight": "model-00026-of-00038.safetensors",
598
+ "model.layers.53.mlp.gate_proj.weight": "model-00025-of-00038.safetensors",
599
+ "model.layers.53.mlp.up_proj.weight": "model-00026-of-00038.safetensors",
600
+ "model.layers.53.post_attention_layernorm.weight": "model-00026-of-00038.safetensors",
601
+ "model.layers.53.self_attn.k_proj.bias": "model-00025-of-00038.safetensors",
602
+ "model.layers.53.self_attn.k_proj.weight": "model-00025-of-00038.safetensors",
603
+ "model.layers.53.self_attn.o_proj.weight": "model-00025-of-00038.safetensors",
604
+ "model.layers.53.self_attn.q_proj.bias": "model-00025-of-00038.safetensors",
605
+ "model.layers.53.self_attn.q_proj.weight": "model-00025-of-00038.safetensors",
606
+ "model.layers.53.self_attn.v_proj.bias": "model-00025-of-00038.safetensors",
607
+ "model.layers.53.self_attn.v_proj.weight": "model-00025-of-00038.safetensors",
608
+ "model.layers.54.input_layernorm.weight": "model-00026-of-00038.safetensors",
609
+ "model.layers.54.mlp.down_proj.weight": "model-00026-of-00038.safetensors",
610
+ "model.layers.54.mlp.gate_proj.weight": "model-00026-of-00038.safetensors",
611
+ "model.layers.54.mlp.up_proj.weight": "model-00026-of-00038.safetensors",
612
+ "model.layers.54.post_attention_layernorm.weight": "model-00026-of-00038.safetensors",
613
+ "model.layers.54.self_attn.k_proj.bias": "model-00026-of-00038.safetensors",
614
+ "model.layers.54.self_attn.k_proj.weight": "model-00026-of-00038.safetensors",
615
+ "model.layers.54.self_attn.o_proj.weight": "model-00026-of-00038.safetensors",
616
+ "model.layers.54.self_attn.q_proj.bias": "model-00026-of-00038.safetensors",
617
+ "model.layers.54.self_attn.q_proj.weight": "model-00026-of-00038.safetensors",
618
+ "model.layers.54.self_attn.v_proj.bias": "model-00026-of-00038.safetensors",
619
+ "model.layers.54.self_attn.v_proj.weight": "model-00026-of-00038.safetensors",
620
+ "model.layers.55.input_layernorm.weight": "model-00027-of-00038.safetensors",
621
+ "model.layers.55.mlp.down_proj.weight": "model-00027-of-00038.safetensors",
622
+ "model.layers.55.mlp.gate_proj.weight": "model-00026-of-00038.safetensors",
623
+ "model.layers.55.mlp.up_proj.weight": "model-00026-of-00038.safetensors",
624
+ "model.layers.55.post_attention_layernorm.weight": "model-00027-of-00038.safetensors",
625
+ "model.layers.55.self_attn.k_proj.bias": "model-00026-of-00038.safetensors",
626
+ "model.layers.55.self_attn.k_proj.weight": "model-00026-of-00038.safetensors",
627
+ "model.layers.55.self_attn.o_proj.weight": "model-00026-of-00038.safetensors",
628
+ "model.layers.55.self_attn.q_proj.bias": "model-00026-of-00038.safetensors",
629
+ "model.layers.55.self_attn.q_proj.weight": "model-00026-of-00038.safetensors",
630
+ "model.layers.55.self_attn.v_proj.bias": "model-00026-of-00038.safetensors",
631
+ "model.layers.55.self_attn.v_proj.weight": "model-00026-of-00038.safetensors",
632
+ "model.layers.56.input_layernorm.weight": "model-00027-of-00038.safetensors",
633
+ "model.layers.56.mlp.down_proj.weight": "model-00027-of-00038.safetensors",
634
+ "model.layers.56.mlp.gate_proj.weight": "model-00027-of-00038.safetensors",
635
+ "model.layers.56.mlp.up_proj.weight": "model-00027-of-00038.safetensors",
636
+ "model.layers.56.post_attention_layernorm.weight": "model-00027-of-00038.safetensors",
637
+ "model.layers.56.self_attn.k_proj.bias": "model-00027-of-00038.safetensors",
638
+ "model.layers.56.self_attn.k_proj.weight": "model-00027-of-00038.safetensors",
639
+ "model.layers.56.self_attn.o_proj.weight": "model-00027-of-00038.safetensors",
640
+ "model.layers.56.self_attn.q_proj.bias": "model-00027-of-00038.safetensors",
641
+ "model.layers.56.self_attn.q_proj.weight": "model-00027-of-00038.safetensors",
642
+ "model.layers.56.self_attn.v_proj.bias": "model-00027-of-00038.safetensors",
643
+ "model.layers.56.self_attn.v_proj.weight": "model-00027-of-00038.safetensors",
644
+ "model.layers.57.input_layernorm.weight": "model-00027-of-00038.safetensors",
645
+ "model.layers.57.mlp.down_proj.weight": "model-00027-of-00038.safetensors",
646
+ "model.layers.57.mlp.gate_proj.weight": "model-00027-of-00038.safetensors",
647
+ "model.layers.57.mlp.up_proj.weight": "model-00027-of-00038.safetensors",
648
+ "model.layers.57.post_attention_layernorm.weight": "model-00027-of-00038.safetensors",
649
+ "model.layers.57.self_attn.k_proj.bias": "model-00027-of-00038.safetensors",
650
+ "model.layers.57.self_attn.k_proj.weight": "model-00027-of-00038.safetensors",
651
+ "model.layers.57.self_attn.o_proj.weight": "model-00027-of-00038.safetensors",
652
+ "model.layers.57.self_attn.q_proj.bias": "model-00027-of-00038.safetensors",
653
+ "model.layers.57.self_attn.q_proj.weight": "model-00027-of-00038.safetensors",
654
+ "model.layers.57.self_attn.v_proj.bias": "model-00027-of-00038.safetensors",
655
+ "model.layers.57.self_attn.v_proj.weight": "model-00027-of-00038.safetensors",
656
+ "model.layers.58.input_layernorm.weight": "model-00028-of-00038.safetensors",
657
+ "model.layers.58.mlp.down_proj.weight": "model-00028-of-00038.safetensors",
658
+ "model.layers.58.mlp.gate_proj.weight": "model-00028-of-00038.safetensors",
659
+ "model.layers.58.mlp.up_proj.weight": "model-00028-of-00038.safetensors",
660
+ "model.layers.58.post_attention_layernorm.weight": "model-00028-of-00038.safetensors",
661
+ "model.layers.58.self_attn.k_proj.bias": "model-00028-of-00038.safetensors",
662
+ "model.layers.58.self_attn.k_proj.weight": "model-00028-of-00038.safetensors",
663
+ "model.layers.58.self_attn.o_proj.weight": "model-00028-of-00038.safetensors",
664
+ "model.layers.58.self_attn.q_proj.bias": "model-00028-of-00038.safetensors",
665
+ "model.layers.58.self_attn.q_proj.weight": "model-00028-of-00038.safetensors",
666
+ "model.layers.58.self_attn.v_proj.bias": "model-00028-of-00038.safetensors",
667
+ "model.layers.58.self_attn.v_proj.weight": "model-00028-of-00038.safetensors",
668
+ "model.layers.59.input_layernorm.weight": "model-00028-of-00038.safetensors",
669
+ "model.layers.59.mlp.down_proj.weight": "model-00028-of-00038.safetensors",
670
+ "model.layers.59.mlp.gate_proj.weight": "model-00028-of-00038.safetensors",
671
+ "model.layers.59.mlp.up_proj.weight": "model-00028-of-00038.safetensors",
672
+ "model.layers.59.post_attention_layernorm.weight": "model-00028-of-00038.safetensors",
673
+ "model.layers.59.self_attn.k_proj.bias": "model-00028-of-00038.safetensors",
674
+ "model.layers.59.self_attn.k_proj.weight": "model-00028-of-00038.safetensors",
675
+ "model.layers.59.self_attn.o_proj.weight": "model-00028-of-00038.safetensors",
676
+ "model.layers.59.self_attn.q_proj.bias": "model-00028-of-00038.safetensors",
677
+ "model.layers.59.self_attn.q_proj.weight": "model-00028-of-00038.safetensors",
678
+ "model.layers.59.self_attn.v_proj.bias": "model-00028-of-00038.safetensors",
679
+ "model.layers.59.self_attn.v_proj.weight": "model-00028-of-00038.safetensors",
680
+ "model.layers.6.input_layernorm.weight": "model-00004-of-00038.safetensors",
681
+ "model.layers.6.mlp.down_proj.weight": "model-00004-of-00038.safetensors",
682
+ "model.layers.6.mlp.gate_proj.weight": "model-00004-of-00038.safetensors",
683
+ "model.layers.6.mlp.up_proj.weight": "model-00004-of-00038.safetensors",
684
+ "model.layers.6.post_attention_layernorm.weight": "model-00004-of-00038.safetensors",
685
+ "model.layers.6.self_attn.k_proj.bias": "model-00004-of-00038.safetensors",
686
+ "model.layers.6.self_attn.k_proj.weight": "model-00004-of-00038.safetensors",
687
+ "model.layers.6.self_attn.o_proj.weight": "model-00004-of-00038.safetensors",
688
+ "model.layers.6.self_attn.q_proj.bias": "model-00004-of-00038.safetensors",
689
+ "model.layers.6.self_attn.q_proj.weight": "model-00004-of-00038.safetensors",
690
+ "model.layers.6.self_attn.v_proj.bias": "model-00004-of-00038.safetensors",
691
+ "model.layers.6.self_attn.v_proj.weight": "model-00004-of-00038.safetensors",
692
+ "model.layers.60.input_layernorm.weight": "model-00029-of-00038.safetensors",
693
+ "model.layers.60.mlp.down_proj.weight": "model-00029-of-00038.safetensors",
694
+ "model.layers.60.mlp.gate_proj.weight": "model-00029-of-00038.safetensors",
695
+ "model.layers.60.mlp.up_proj.weight": "model-00029-of-00038.safetensors",
696
+ "model.layers.60.post_attention_layernorm.weight": "model-00029-of-00038.safetensors",
697
+ "model.layers.60.self_attn.k_proj.bias": "model-00028-of-00038.safetensors",
698
+ "model.layers.60.self_attn.k_proj.weight": "model-00028-of-00038.safetensors",
699
+ "model.layers.60.self_attn.o_proj.weight": "model-00029-of-00038.safetensors",
700
+ "model.layers.60.self_attn.q_proj.bias": "model-00028-of-00038.safetensors",
701
+ "model.layers.60.self_attn.q_proj.weight": "model-00028-of-00038.safetensors",
702
+ "model.layers.60.self_attn.v_proj.bias": "model-00028-of-00038.safetensors",
703
+ "model.layers.60.self_attn.v_proj.weight": "model-00028-of-00038.safetensors",
704
+ "model.layers.61.input_layernorm.weight": "model-00029-of-00038.safetensors",
705
+ "model.layers.61.mlp.down_proj.weight": "model-00029-of-00038.safetensors",
706
+ "model.layers.61.mlp.gate_proj.weight": "model-00029-of-00038.safetensors",
707
+ "model.layers.61.mlp.up_proj.weight": "model-00029-of-00038.safetensors",
708
+ "model.layers.61.post_attention_layernorm.weight": "model-00029-of-00038.safetensors",
709
+ "model.layers.61.self_attn.k_proj.bias": "model-00029-of-00038.safetensors",
710
+ "model.layers.61.self_attn.k_proj.weight": "model-00029-of-00038.safetensors",
711
+ "model.layers.61.self_attn.o_proj.weight": "model-00029-of-00038.safetensors",
712
+ "model.layers.61.self_attn.q_proj.bias": "model-00029-of-00038.safetensors",
713
+ "model.layers.61.self_attn.q_proj.weight": "model-00029-of-00038.safetensors",
714
+ "model.layers.61.self_attn.v_proj.bias": "model-00029-of-00038.safetensors",
715
+ "model.layers.61.self_attn.v_proj.weight": "model-00029-of-00038.safetensors",
716
+ "model.layers.62.input_layernorm.weight": "model-00030-of-00038.safetensors",
717
+ "model.layers.62.mlp.down_proj.weight": "model-00030-of-00038.safetensors",
718
+ "model.layers.62.mlp.gate_proj.weight": "model-00030-of-00038.safetensors",
719
+ "model.layers.62.mlp.up_proj.weight": "model-00030-of-00038.safetensors",
720
+ "model.layers.62.post_attention_layernorm.weight": "model-00030-of-00038.safetensors",
721
+ "model.layers.62.self_attn.k_proj.bias": "model-00029-of-00038.safetensors",
722
+ "model.layers.62.self_attn.k_proj.weight": "model-00029-of-00038.safetensors",
723
+ "model.layers.62.self_attn.o_proj.weight": "model-00029-of-00038.safetensors",
724
+ "model.layers.62.self_attn.q_proj.bias": "model-00029-of-00038.safetensors",
725
+ "model.layers.62.self_attn.q_proj.weight": "model-00029-of-00038.safetensors",
726
+ "model.layers.62.self_attn.v_proj.bias": "model-00029-of-00038.safetensors",
727
+ "model.layers.62.self_attn.v_proj.weight": "model-00029-of-00038.safetensors",
728
+ "model.layers.63.input_layernorm.weight": "model-00030-of-00038.safetensors",
729
+ "model.layers.63.mlp.down_proj.weight": "model-00030-of-00038.safetensors",
730
+ "model.layers.63.mlp.gate_proj.weight": "model-00030-of-00038.safetensors",
731
+ "model.layers.63.mlp.up_proj.weight": "model-00030-of-00038.safetensors",
732
+ "model.layers.63.post_attention_layernorm.weight": "model-00030-of-00038.safetensors",
733
+ "model.layers.63.self_attn.k_proj.bias": "model-00030-of-00038.safetensors",
734
+ "model.layers.63.self_attn.k_proj.weight": "model-00030-of-00038.safetensors",
735
+ "model.layers.63.self_attn.o_proj.weight": "model-00030-of-00038.safetensors",
736
+ "model.layers.63.self_attn.q_proj.bias": "model-00030-of-00038.safetensors",
737
+ "model.layers.63.self_attn.q_proj.weight": "model-00030-of-00038.safetensors",
738
+ "model.layers.63.self_attn.v_proj.bias": "model-00030-of-00038.safetensors",
739
+ "model.layers.63.self_attn.v_proj.weight": "model-00030-of-00038.safetensors",
740
+ "model.layers.64.input_layernorm.weight": "model-00031-of-00038.safetensors",
741
+ "model.layers.64.mlp.down_proj.weight": "model-00031-of-00038.safetensors",
742
+ "model.layers.64.mlp.gate_proj.weight": "model-00030-of-00038.safetensors",
743
+ "model.layers.64.mlp.up_proj.weight": "model-00031-of-00038.safetensors",
744
+ "model.layers.64.post_attention_layernorm.weight": "model-00031-of-00038.safetensors",
745
+ "model.layers.64.self_attn.k_proj.bias": "model-00030-of-00038.safetensors",
746
+ "model.layers.64.self_attn.k_proj.weight": "model-00030-of-00038.safetensors",
747
+ "model.layers.64.self_attn.o_proj.weight": "model-00030-of-00038.safetensors",
748
+ "model.layers.64.self_attn.q_proj.bias": "model-00030-of-00038.safetensors",
749
+ "model.layers.64.self_attn.q_proj.weight": "model-00030-of-00038.safetensors",
750
+ "model.layers.64.self_attn.v_proj.bias": "model-00030-of-00038.safetensors",
751
+ "model.layers.64.self_attn.v_proj.weight": "model-00030-of-00038.safetensors",
752
+ "model.layers.65.input_layernorm.weight": "model-00031-of-00038.safetensors",
753
+ "model.layers.65.mlp.down_proj.weight": "model-00031-of-00038.safetensors",
754
+ "model.layers.65.mlp.gate_proj.weight": "model-00031-of-00038.safetensors",
755
+ "model.layers.65.mlp.up_proj.weight": "model-00031-of-00038.safetensors",
756
+ "model.layers.65.post_attention_layernorm.weight": "model-00031-of-00038.safetensors",
757
+ "model.layers.65.self_attn.k_proj.bias": "model-00031-of-00038.safetensors",
758
+ "model.layers.65.self_attn.k_proj.weight": "model-00031-of-00038.safetensors",
759
+ "model.layers.65.self_attn.o_proj.weight": "model-00031-of-00038.safetensors",
760
+ "model.layers.65.self_attn.q_proj.bias": "model-00031-of-00038.safetensors",
761
+ "model.layers.65.self_attn.q_proj.weight": "model-00031-of-00038.safetensors",
762
+ "model.layers.65.self_attn.v_proj.bias": "model-00031-of-00038.safetensors",
763
+ "model.layers.65.self_attn.v_proj.weight": "model-00031-of-00038.safetensors",
764
+ "model.layers.66.input_layernorm.weight": "model-00032-of-00038.safetensors",
765
+ "model.layers.66.mlp.down_proj.weight": "model-00032-of-00038.safetensors",
766
+ "model.layers.66.mlp.gate_proj.weight": "model-00031-of-00038.safetensors",
767
+ "model.layers.66.mlp.up_proj.weight": "model-00031-of-00038.safetensors",
768
+ "model.layers.66.post_attention_layernorm.weight": "model-00032-of-00038.safetensors",
769
+ "model.layers.66.self_attn.k_proj.bias": "model-00031-of-00038.safetensors",
770
+ "model.layers.66.self_attn.k_proj.weight": "model-00031-of-00038.safetensors",
771
+ "model.layers.66.self_attn.o_proj.weight": "model-00031-of-00038.safetensors",
772
+ "model.layers.66.self_attn.q_proj.bias": "model-00031-of-00038.safetensors",
773
+ "model.layers.66.self_attn.q_proj.weight": "model-00031-of-00038.safetensors",
774
+ "model.layers.66.self_attn.v_proj.bias": "model-00031-of-00038.safetensors",
775
+ "model.layers.66.self_attn.v_proj.weight": "model-00031-of-00038.safetensors",
776
+ "model.layers.67.input_layernorm.weight": "model-00032-of-00038.safetensors",
777
+ "model.layers.67.mlp.down_proj.weight": "model-00032-of-00038.safetensors",
778
+ "model.layers.67.mlp.gate_proj.weight": "model-00032-of-00038.safetensors",
779
+ "model.layers.67.mlp.up_proj.weight": "model-00032-of-00038.safetensors",
780
+ "model.layers.67.post_attention_layernorm.weight": "model-00032-of-00038.safetensors",
781
+ "model.layers.67.self_attn.k_proj.bias": "model-00032-of-00038.safetensors",
782
+ "model.layers.67.self_attn.k_proj.weight": "model-00032-of-00038.safetensors",
783
+ "model.layers.67.self_attn.o_proj.weight": "model-00032-of-00038.safetensors",
784
+ "model.layers.67.self_attn.q_proj.bias": "model-00032-of-00038.safetensors",
785
+ "model.layers.67.self_attn.q_proj.weight": "model-00032-of-00038.safetensors",
786
+ "model.layers.67.self_attn.v_proj.bias": "model-00032-of-00038.safetensors",
787
+ "model.layers.67.self_attn.v_proj.weight": "model-00032-of-00038.safetensors",
788
+ "model.layers.68.input_layernorm.weight": "model-00032-of-00038.safetensors",
789
+ "model.layers.68.mlp.down_proj.weight": "model-00032-of-00038.safetensors",
790
+ "model.layers.68.mlp.gate_proj.weight": "model-00032-of-00038.safetensors",
791
+ "model.layers.68.mlp.up_proj.weight": "model-00032-of-00038.safetensors",
792
+ "model.layers.68.post_attention_layernorm.weight": "model-00032-of-00038.safetensors",
793
+ "model.layers.68.self_attn.k_proj.bias": "model-00032-of-00038.safetensors",
794
+ "model.layers.68.self_attn.k_proj.weight": "model-00032-of-00038.safetensors",
795
+ "model.layers.68.self_attn.o_proj.weight": "model-00032-of-00038.safetensors",
796
+ "model.layers.68.self_attn.q_proj.bias": "model-00032-of-00038.safetensors",
797
+ "model.layers.68.self_attn.q_proj.weight": "model-00032-of-00038.safetensors",
798
+ "model.layers.68.self_attn.v_proj.bias": "model-00032-of-00038.safetensors",
799
+ "model.layers.68.self_attn.v_proj.weight": "model-00032-of-00038.safetensors",
800
+ "model.layers.69.input_layernorm.weight": "model-00033-of-00038.safetensors",
801
+ "model.layers.69.mlp.down_proj.weight": "model-00033-of-00038.safetensors",
802
+ "model.layers.69.mlp.gate_proj.weight": "model-00033-of-00038.safetensors",
803
+ "model.layers.69.mlp.up_proj.weight": "model-00033-of-00038.safetensors",
804
+ "model.layers.69.post_attention_layernorm.weight": "model-00033-of-00038.safetensors",
805
+ "model.layers.69.self_attn.k_proj.bias": "model-00033-of-00038.safetensors",
806
+ "model.layers.69.self_attn.k_proj.weight": "model-00033-of-00038.safetensors",
807
+ "model.layers.69.self_attn.o_proj.weight": "model-00033-of-00038.safetensors",
808
+ "model.layers.69.self_attn.q_proj.bias": "model-00033-of-00038.safetensors",
809
+ "model.layers.69.self_attn.q_proj.weight": "model-00033-of-00038.safetensors",
810
+ "model.layers.69.self_attn.v_proj.bias": "model-00033-of-00038.safetensors",
811
+ "model.layers.69.self_attn.v_proj.weight": "model-00033-of-00038.safetensors",
812
+ "model.layers.7.input_layernorm.weight": "model-00005-of-00038.safetensors",
813
+ "model.layers.7.mlp.down_proj.weight": "model-00005-of-00038.safetensors",
814
+ "model.layers.7.mlp.gate_proj.weight": "model-00005-of-00038.safetensors",
815
+ "model.layers.7.mlp.up_proj.weight": "model-00005-of-00038.safetensors",
816
+ "model.layers.7.post_attention_layernorm.weight": "model-00005-of-00038.safetensors",
817
+ "model.layers.7.self_attn.k_proj.bias": "model-00004-of-00038.safetensors",
818
+ "model.layers.7.self_attn.k_proj.weight": "model-00004-of-00038.safetensors",
819
+ "model.layers.7.self_attn.o_proj.weight": "model-00004-of-00038.safetensors",
820
+ "model.layers.7.self_attn.q_proj.bias": "model-00004-of-00038.safetensors",
821
+ "model.layers.7.self_attn.q_proj.weight": "model-00004-of-00038.safetensors",
822
+ "model.layers.7.self_attn.v_proj.bias": "model-00004-of-00038.safetensors",
823
+ "model.layers.7.self_attn.v_proj.weight": "model-00004-of-00038.safetensors",
824
+ "model.layers.70.input_layernorm.weight": "model-00033-of-00038.safetensors",
825
+ "model.layers.70.mlp.down_proj.weight": "model-00033-of-00038.safetensors",
826
+ "model.layers.70.mlp.gate_proj.weight": "model-00033-of-00038.safetensors",
827
+ "model.layers.70.mlp.up_proj.weight": "model-00033-of-00038.safetensors",
828
+ "model.layers.70.post_attention_layernorm.weight": "model-00033-of-00038.safetensors",
829
+ "model.layers.70.self_attn.k_proj.bias": "model-00033-of-00038.safetensors",
830
+ "model.layers.70.self_attn.k_proj.weight": "model-00033-of-00038.safetensors",
831
+ "model.layers.70.self_attn.o_proj.weight": "model-00033-of-00038.safetensors",
832
+ "model.layers.70.self_attn.q_proj.bias": "model-00033-of-00038.safetensors",
833
+ "model.layers.70.self_attn.q_proj.weight": "model-00033-of-00038.safetensors",
834
+ "model.layers.70.self_attn.v_proj.bias": "model-00033-of-00038.safetensors",
835
+ "model.layers.70.self_attn.v_proj.weight": "model-00033-of-00038.safetensors",
836
+ "model.layers.71.input_layernorm.weight": "model-00034-of-00038.safetensors",
837
+ "model.layers.71.mlp.down_proj.weight": "model-00034-of-00038.safetensors",
838
+ "model.layers.71.mlp.gate_proj.weight": "model-00034-of-00038.safetensors",
839
+ "model.layers.71.mlp.up_proj.weight": "model-00034-of-00038.safetensors",
840
+ "model.layers.71.post_attention_layernorm.weight": "model-00034-of-00038.safetensors",
841
+ "model.layers.71.self_attn.k_proj.bias": "model-00033-of-00038.safetensors",
842
+ "model.layers.71.self_attn.k_proj.weight": "model-00033-of-00038.safetensors",
843
+ "model.layers.71.self_attn.o_proj.weight": "model-00034-of-00038.safetensors",
844
+ "model.layers.71.self_attn.q_proj.bias": "model-00033-of-00038.safetensors",
845
+ "model.layers.71.self_attn.q_proj.weight": "model-00033-of-00038.safetensors",
846
+ "model.layers.71.self_attn.v_proj.bias": "model-00033-of-00038.safetensors",
847
+ "model.layers.71.self_attn.v_proj.weight": "model-00033-of-00038.safetensors",
848
+ "model.layers.72.input_layernorm.weight": "model-00034-of-00038.safetensors",
849
+ "model.layers.72.mlp.down_proj.weight": "model-00034-of-00038.safetensors",
850
+ "model.layers.72.mlp.gate_proj.weight": "model-00034-of-00038.safetensors",
851
+ "model.layers.72.mlp.up_proj.weight": "model-00034-of-00038.safetensors",
852
+ "model.layers.72.post_attention_layernorm.weight": "model-00034-of-00038.safetensors",
853
+ "model.layers.72.self_attn.k_proj.bias": "model-00034-of-00038.safetensors",
854
+ "model.layers.72.self_attn.k_proj.weight": "model-00034-of-00038.safetensors",
855
+ "model.layers.72.self_attn.o_proj.weight": "model-00034-of-00038.safetensors",
856
+ "model.layers.72.self_attn.q_proj.bias": "model-00034-of-00038.safetensors",
857
+ "model.layers.72.self_attn.q_proj.weight": "model-00034-of-00038.safetensors",
858
+ "model.layers.72.self_attn.v_proj.bias": "model-00034-of-00038.safetensors",
859
+ "model.layers.72.self_attn.v_proj.weight": "model-00034-of-00038.safetensors",
860
+ "model.layers.73.input_layernorm.weight": "model-00035-of-00038.safetensors",
861
+ "model.layers.73.mlp.down_proj.weight": "model-00035-of-00038.safetensors",
862
+ "model.layers.73.mlp.gate_proj.weight": "model-00035-of-00038.safetensors",
863
+ "model.layers.73.mlp.up_proj.weight": "model-00035-of-00038.safetensors",
864
+ "model.layers.73.post_attention_layernorm.weight": "model-00035-of-00038.safetensors",
865
+ "model.layers.73.self_attn.k_proj.bias": "model-00034-of-00038.safetensors",
866
+ "model.layers.73.self_attn.k_proj.weight": "model-00034-of-00038.safetensors",
867
+ "model.layers.73.self_attn.o_proj.weight": "model-00034-of-00038.safetensors",
868
+ "model.layers.73.self_attn.q_proj.bias": "model-00034-of-00038.safetensors",
869
+ "model.layers.73.self_attn.q_proj.weight": "model-00034-of-00038.safetensors",
870
+ "model.layers.73.self_attn.v_proj.bias": "model-00034-of-00038.safetensors",
871
+ "model.layers.73.self_attn.v_proj.weight": "model-00034-of-00038.safetensors",
872
+ "model.layers.74.input_layernorm.weight": "model-00035-of-00038.safetensors",
873
+ "model.layers.74.mlp.down_proj.weight": "model-00035-of-00038.safetensors",
874
+ "model.layers.74.mlp.gate_proj.weight": "model-00035-of-00038.safetensors",
875
+ "model.layers.74.mlp.up_proj.weight": "model-00035-of-00038.safetensors",
876
+ "model.layers.74.post_attention_layernorm.weight": "model-00035-of-00038.safetensors",
877
+ "model.layers.74.self_attn.k_proj.bias": "model-00035-of-00038.safetensors",
878
+ "model.layers.74.self_attn.k_proj.weight": "model-00035-of-00038.safetensors",
879
+ "model.layers.74.self_attn.o_proj.weight": "model-00035-of-00038.safetensors",
880
+ "model.layers.74.self_attn.q_proj.bias": "model-00035-of-00038.safetensors",
881
+ "model.layers.74.self_attn.q_proj.weight": "model-00035-of-00038.safetensors",
882
+ "model.layers.74.self_attn.v_proj.bias": "model-00035-of-00038.safetensors",
883
+ "model.layers.74.self_attn.v_proj.weight": "model-00035-of-00038.safetensors",
884
+ "model.layers.75.input_layernorm.weight": "model-00036-of-00038.safetensors",
885
+ "model.layers.75.mlp.down_proj.weight": "model-00036-of-00038.safetensors",
886
+ "model.layers.75.mlp.gate_proj.weight": "model-00035-of-00038.safetensors",
887
+ "model.layers.75.mlp.up_proj.weight": "model-00036-of-00038.safetensors",
888
+ "model.layers.75.post_attention_layernorm.weight": "model-00036-of-00038.safetensors",
889
+ "model.layers.75.self_attn.k_proj.bias": "model-00035-of-00038.safetensors",
890
+ "model.layers.75.self_attn.k_proj.weight": "model-00035-of-00038.safetensors",
891
+ "model.layers.75.self_attn.o_proj.weight": "model-00035-of-00038.safetensors",
892
+ "model.layers.75.self_attn.q_proj.bias": "model-00035-of-00038.safetensors",
893
+ "model.layers.75.self_attn.q_proj.weight": "model-00035-of-00038.safetensors",
894
+ "model.layers.75.self_attn.v_proj.bias": "model-00035-of-00038.safetensors",
895
+ "model.layers.75.self_attn.v_proj.weight": "model-00035-of-00038.safetensors",
896
+ "model.layers.76.input_layernorm.weight": "model-00036-of-00038.safetensors",
897
+ "model.layers.76.mlp.down_proj.weight": "model-00036-of-00038.safetensors",
898
+ "model.layers.76.mlp.gate_proj.weight": "model-00036-of-00038.safetensors",
899
+ "model.layers.76.mlp.up_proj.weight": "model-00036-of-00038.safetensors",
900
+ "model.layers.76.post_attention_layernorm.weight": "model-00036-of-00038.safetensors",
901
+ "model.layers.76.self_attn.k_proj.bias": "model-00036-of-00038.safetensors",
902
+ "model.layers.76.self_attn.k_proj.weight": "model-00036-of-00038.safetensors",
903
+ "model.layers.76.self_attn.o_proj.weight": "model-00036-of-00038.safetensors",
904
+ "model.layers.76.self_attn.q_proj.bias": "model-00036-of-00038.safetensors",
905
+ "model.layers.76.self_attn.q_proj.weight": "model-00036-of-00038.safetensors",
906
+ "model.layers.76.self_attn.v_proj.bias": "model-00036-of-00038.safetensors",
907
+ "model.layers.76.self_attn.v_proj.weight": "model-00036-of-00038.safetensors",
908
+ "model.layers.77.input_layernorm.weight": "model-00037-of-00038.safetensors",
909
+ "model.layers.77.mlp.down_proj.weight": "model-00037-of-00038.safetensors",
910
+ "model.layers.77.mlp.gate_proj.weight": "model-00036-of-00038.safetensors",
911
+ "model.layers.77.mlp.up_proj.weight": "model-00036-of-00038.safetensors",
912
+ "model.layers.77.post_attention_layernorm.weight": "model-00037-of-00038.safetensors",
913
+ "model.layers.77.self_attn.k_proj.bias": "model-00036-of-00038.safetensors",
914
+ "model.layers.77.self_attn.k_proj.weight": "model-00036-of-00038.safetensors",
915
+ "model.layers.77.self_attn.o_proj.weight": "model-00036-of-00038.safetensors",
916
+ "model.layers.77.self_attn.q_proj.bias": "model-00036-of-00038.safetensors",
917
+ "model.layers.77.self_attn.q_proj.weight": "model-00036-of-00038.safetensors",
918
+ "model.layers.77.self_attn.v_proj.bias": "model-00036-of-00038.safetensors",
919
+ "model.layers.77.self_attn.v_proj.weight": "model-00036-of-00038.safetensors",
920
+ "model.layers.78.input_layernorm.weight": "model-00037-of-00038.safetensors",
921
+ "model.layers.78.mlp.down_proj.weight": "model-00037-of-00038.safetensors",
922
+ "model.layers.78.mlp.gate_proj.weight": "model-00037-of-00038.safetensors",
923
+ "model.layers.78.mlp.up_proj.weight": "model-00037-of-00038.safetensors",
924
+ "model.layers.78.post_attention_layernorm.weight": "model-00037-of-00038.safetensors",
925
+ "model.layers.78.self_attn.k_proj.bias": "model-00037-of-00038.safetensors",
926
+ "model.layers.78.self_attn.k_proj.weight": "model-00037-of-00038.safetensors",
927
+ "model.layers.78.self_attn.o_proj.weight": "model-00037-of-00038.safetensors",
928
+ "model.layers.78.self_attn.q_proj.bias": "model-00037-of-00038.safetensors",
929
+ "model.layers.78.self_attn.q_proj.weight": "model-00037-of-00038.safetensors",
930
+ "model.layers.78.self_attn.v_proj.bias": "model-00037-of-00038.safetensors",
931
+ "model.layers.78.self_attn.v_proj.weight": "model-00037-of-00038.safetensors",
932
+ "model.layers.79.input_layernorm.weight": "model-00037-of-00038.safetensors",
933
+ "model.layers.79.mlp.down_proj.weight": "model-00037-of-00038.safetensors",
934
+ "model.layers.79.mlp.gate_proj.weight": "model-00037-of-00038.safetensors",
935
+ "model.layers.79.mlp.up_proj.weight": "model-00037-of-00038.safetensors",
936
+ "model.layers.79.post_attention_layernorm.weight": "model-00037-of-00038.safetensors",
937
+ "model.layers.79.self_attn.k_proj.bias": "model-00037-of-00038.safetensors",
938
+ "model.layers.79.self_attn.k_proj.weight": "model-00037-of-00038.safetensors",
939
+ "model.layers.79.self_attn.o_proj.weight": "model-00037-of-00038.safetensors",
940
+ "model.layers.79.self_attn.q_proj.bias": "model-00037-of-00038.safetensors",
941
+ "model.layers.79.self_attn.q_proj.weight": "model-00037-of-00038.safetensors",
942
+ "model.layers.79.self_attn.v_proj.bias": "model-00037-of-00038.safetensors",
943
+ "model.layers.79.self_attn.v_proj.weight": "model-00037-of-00038.safetensors",
944
+ "model.layers.8.input_layernorm.weight": "model-00005-of-00038.safetensors",
945
+ "model.layers.8.mlp.down_proj.weight": "model-00005-of-00038.safetensors",
946
+ "model.layers.8.mlp.gate_proj.weight": "model-00005-of-00038.safetensors",
947
+ "model.layers.8.mlp.up_proj.weight": "model-00005-of-00038.safetensors",
948
+ "model.layers.8.post_attention_layernorm.weight": "model-00005-of-00038.safetensors",
949
+ "model.layers.8.self_attn.k_proj.bias": "model-00005-of-00038.safetensors",
950
+ "model.layers.8.self_attn.k_proj.weight": "model-00005-of-00038.safetensors",
951
+ "model.layers.8.self_attn.o_proj.weight": "model-00005-of-00038.safetensors",
952
+ "model.layers.8.self_attn.q_proj.bias": "model-00005-of-00038.safetensors",
953
+ "model.layers.8.self_attn.q_proj.weight": "model-00005-of-00038.safetensors",
954
+ "model.layers.8.self_attn.v_proj.bias": "model-00005-of-00038.safetensors",
955
+ "model.layers.8.self_attn.v_proj.weight": "model-00005-of-00038.safetensors",
956
+ "model.layers.9.input_layernorm.weight": "model-00006-of-00038.safetensors",
957
+ "model.layers.9.mlp.down_proj.weight": "model-00006-of-00038.safetensors",
958
+ "model.layers.9.mlp.gate_proj.weight": "model-00005-of-00038.safetensors",
959
+ "model.layers.9.mlp.up_proj.weight": "model-00006-of-00038.safetensors",
960
+ "model.layers.9.post_attention_layernorm.weight": "model-00006-of-00038.safetensors",
961
+ "model.layers.9.self_attn.k_proj.bias": "model-00005-of-00038.safetensors",
962
+ "model.layers.9.self_attn.k_proj.weight": "model-00005-of-00038.safetensors",
963
+ "model.layers.9.self_attn.o_proj.weight": "model-00005-of-00038.safetensors",
964
+ "model.layers.9.self_attn.q_proj.bias": "model-00005-of-00038.safetensors",
965
+ "model.layers.9.self_attn.q_proj.weight": "model-00005-of-00038.safetensors",
966
+ "model.layers.9.self_attn.v_proj.bias": "model-00005-of-00038.safetensors",
967
+ "model.layers.9.self_attn.v_proj.weight": "model-00005-of-00038.safetensors",
968
+ "model.norm.weight": "model-00037-of-00038.safetensors"
969
+ }
970
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "151643": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "151644": {
13
+ "content": "<|im_start|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "151645": {
21
+ "content": "<|im_end|>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ }
28
+ },
29
+ "additional_special_tokens": ["<|im_start|>", "<|im_end|>"],
30
+ "bos_token": null,
31
+ "chat_template": "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
32
+ "clean_up_tokenization_spaces": false,
33
+ "eos_token": "<|endoftext|>",
34
+ "errors": "replace",
35
+ "model_max_length": 32768,
36
+ "pad_token": "<|endoftext|>",
37
+ "split_special_tokens": false,
38
+ "tokenizer_class": "Qwen2Tokenizer",
39
+ "unk_token": null
40
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff