Upload folder using huggingface_hub
Browse files- README.md +554 -3
- README_CN.md +564 -0
- config.json +39 -0
- configuration_zhinao.py +92 -0
- generation_config.json +14 -0
- generation_utils.py +187 -0
- model-00001-of-00004.safetensors +3 -0
- model-00002-of-00004.safetensors +3 -0
- model-00003-of-00004.safetensors +3 -0
- model-00004-of-00004.safetensors +3 -0
- model.safetensors.index.json +266 -0
- modeling_zhinao.py +1094 -0
- special_tokens_map.json +3 -0
- tokenization_zhinao.py +257 -0
- tokenizer_config.json +19 -0
- vocab/360.tiktoken +0 -0
README.md
CHANGED
@@ -1,3 +1,554 @@
|
|
1 |
-
---
|
2 |
-
license: apache-2.0
|
3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
license: apache-2.0
|
3 |
+
language:
|
4 |
+
- zh
|
5 |
+
- en
|
6 |
+
library_name: transformers
|
7 |
+
tags:
|
8 |
+
- qihoo360
|
9 |
+
- 奇虎360
|
10 |
+
- zhinao
|
11 |
+
- 360Zhinao
|
12 |
+
- pretrain
|
13 |
+
---
|
14 |
+
|
15 |
+
<p align="left">
|
16 |
+
<a href="./README_CN.md">中文</a> |   English</a> 
|
17 |
+
</p>
|
18 |
+
<br>
|
19 |
+
|
20 |
+
<div align="center">
|
21 |
+
<h1>
|
22 |
+
360Zhinao2 (360智脑)
|
23 |
+
</h1>
|
24 |
+
</div>
|
25 |
+
<div align="center">
|
26 |
+
🤗 <a href="https://huggingface.co/qihoo360">HuggingFace</a>   |   
|
27 |
+
🤖 <a href="https://www.modelscope.cn/profile/qihoo360">ModelScope</a>   |   
|
28 |
+
💬 <a href="./assets/WeChat.png">WeChat (微信)</a>  
|
29 |
+
</div>
|
30 |
+
<br>
|
31 |
+
<p align="center">
|
32 |
+
Feel free to visit 360Zhinao's official website<a href="https://ai.360.com"> https://ai.360.com</a> for more experience.
|
33 |
+
</p>
|
34 |
+
|
35 |
+
<br>
|
36 |
+
|
37 |
+
# Introduction
|
38 |
+
🎉🎉🎉 We released the 360Zhinao2 model series:
|
39 |
+
- **360Zhinao2-7B-Base**
|
40 |
+
- **360Zhinao2-7B-Chat-4K**
|
41 |
+
- **360Zhinao2-7B-Chat-32K**
|
42 |
+
- **360Zhinao2-7B-Chat-360K**
|
43 |
+
|
44 |
+
Notable features of our 360Zhinao models are:
|
45 |
+
|
46 |
+
- **Base Model:** Using popular two-stage training method, In the first stage we totally train 10T tokens with a cosine learning rate schedule. In the second stage we increase the proportion of high-quality data and totally train 100B tokens, with the learning rate decaying directly to 0. The total training data for 360Zhinao2-7B amounts to 10.1T tokens.
|
47 |
+
- **Chat Models:** Powerful chat capabilities and three context lengths of 4K, 32K and 360K.
|
48 |
+
|
49 |
+
<br>
|
50 |
+
|
51 |
+
# News and Updates
|
52 |
+
- [2024.11.18] 🔥🔥🔥We release 360Zhinao2-7B, providing access to both the Base model and Chat models with text lengths of 4K, 32K, and 360K.
|
53 |
+
- [2024.05.23] We released two models, 360Zhinao-search and 360Zhinao-1.8B-Reranking, which ranked first respectively in the Retrieval and Reranking tasks of [C-MTEB Leaderboard](https://huggingface.co/spaces/mteb/leaderboard) .
|
54 |
+
- [2024.05.20] We extended llama3 and released **llama3-8B-360Zhinao-360k-Instruct**<a href="https://huggingface.co/qihoo360/llama3-8B-360Zhinao-360k-Instruct">🤗</a>
|
55 |
+
- [2024.04.12] We released **360Zhinao-7B** v1.0, including the base model and three chat models with context lengths 4K, 32K and 360K.
|
56 |
+
Technical report is on [arXiv](https://arxiv.org/abs/2405.13386).
|
57 |
+
|
58 |
+
<br>
|
59 |
+
|
60 |
+
# Table of contents
|
61 |
+
- [Download URL](#Download-URL)
|
62 |
+
- [Model Evaluation](#Model-Evaluation)
|
63 |
+
- [Quickstart](#Quickstart)
|
64 |
+
- [Model Inference](#Model-Inference)
|
65 |
+
- [Model Finetune](#Model-Finetune)
|
66 |
+
- [License](#License)
|
67 |
+
|
68 |
+
<br>
|
69 |
+
|
70 |
+
# Download URL
|
71 |
+
|
72 |
+
| Size | Model | BF16 | Int4|
|
73 |
+
|-|-|-|-|
|
74 |
+
| 7B | 360Zhinao2-7B-Base | <a href="https://www.modelscope.cn/models/qihoo360/360Zhinao2-7B-Base/summary">🤖</a> <a href="https://huggingface.co/qihoo360/360Zhinao2-7B-Base">🤗</a> | |
|
75 |
+
| 7B | 360Zhinao2-7B-Chat-4K | <a href="https://www.modelscope.cn/models/qihoo360/360Zhinao2-7B-Chat-4K/summary">🤖</a> <a href="https://huggingface.co/qihoo360/360Zhinao2-7B-Chat-4K">🤗</a> | <a href="https://www.modelscope.cn/models/qihoo360/360Zhinao2-7B-Chat-4K-Int4/summary">🤖</a> <a href="https://huggingface.co/qihoo360/360Zhinao2-7B-Chat-4K-Int4">🤗</a> |
|
76 |
+
| 7B | 360Zhinao2-7B-Chat-32K | <a href="https://www.modelscope.cn/models/qihoo360/360Zhinao2-7B-Chat-32K/summary">🤖</a> <a href="https://huggingface.co/qihoo360/360Zhinao2-7B-Chat-32K">🤗</a> | <a href="https://www.modelscope.cn/models/qihoo360/360Zhinao2-7B-Chat-32K-Int4/summary">🤖</a> <a href="https://huggingface.co/qihoo360/360Zhinao2-7B-Chat-32K-Int4">🤗</a> |
|
77 |
+
| 7B | 360Zhinao2-7B-Chat-360K | <a href="https://www.modelscope.cn/models/qihoo360/360Zhinao2-7B-Chat-360K/summary">🤖</a> <a href="https://huggingface.co/qihoo360/360Zhinao2-7B-Chat-360K">🤗</a> | <a href="https://www.modelscope.cn/models/qihoo360/360Zhinao2-7B-Chat-360K-Int4/summary">🤖</a> <a href="https://huggingface.co/qihoo360/360Zhinao2-7B-Chat-360K-Int4">🤗</a> |
|
78 |
+
|
79 |
+
<br>
|
80 |
+
|
81 |
+
# Model Evaluation
|
82 |
+
## Base Model
|
83 |
+
We used the open-source tool OpenCompass to evaluate the model and compared it with open-source models under 10B from the past six months. The 360Zhinao2-7B model is competive. The 360Zhinao2-7B model performs well on Chinese benchmarks such as CEval, C3 and LCSTS. The average socres of Chinese benchmarks is No 1. It also ranks No 1 on Math which is a challenging competition math dataset. **The 360Zhinao2-7B model has advantages in Chinese benchmark and challenging competition math.**
|
84 |
+
|
85 |
+
<table>
|
86 |
+
<tr>
|
87 |
+
<td>Type</td><td>Datasets</td><td>language</td><td>glm4-9b</td><td>Qwen2.5-7B</td><td>internlm2.5-7b</td><td>Yi1.5-9B</td><td>gemma2-9b</td><td>Llama3.1-8B</td><td>360Zhinao2-7B</td>
|
88 |
+
</tr>
|
89 |
+
<tr>
|
90 |
+
<td rowspan="5">Exam</td><td>ceval</td><td>zh</td><td>75.83</td><td>81.41</td><td>77.71</td><td>73.51</td><td>56.36</td><td>51.67</td><td><strong>83.04</strong></td>
|
91 |
+
</tr>
|
92 |
+
<tr>
|
93 |
+
<td>mmlu</td><td>en</td><td>75.5</td><td>75.5</td><td>71.55</td><td>71.43</td><td>72.22</td><td>66.75</td><td>67.84</td>
|
94 |
+
</tr>
|
95 |
+
<tr>
|
96 |
+
<td>cmmlu</td><td>zh</td><td>74.24</td><td>81.79</td><td>78.77</td><td>74.2</td><td>58.89</td><td>52.49</td><td>73.8</td>
|
97 |
+
</tr>
|
98 |
+
<tr>
|
99 |
+
<td>ARC-c</td><td>en</td><td>94.92</td><td>80</td><td>85.08</td><td>87.46</td><td>77.63</td><td>80.68</td><td>87.12</td>
|
100 |
+
</tr>
|
101 |
+
<tr>
|
102 |
+
<td>ARC-e</td><td>en</td><td>98.41</td><td>84.83</td><td>95.24</td><td>94.53</td><td>78.84</td><td>89.77</td><td>92.77</td>
|
103 |
+
</tr>
|
104 |
+
<tr>
|
105 |
+
<td rowspan="2">Language</td><td>WiC</td><td>en</td><td>51.57</td><td>52.82</td><td>50.78</td><td>50.63</td><td>50.47</td><td>50</td><td>49.84</td>
|
106 |
+
</tr>
|
107 |
+
<tr>
|
108 |
+
<td>WSC</td><td>en</td><td>68.27</td><td>68.27</td><td>69.23</td><td>66.35</td><td>68.27</td><td>67.31</td><td>65.38</td>
|
109 |
+
</tr>
|
110 |
+
<tr>
|
111 |
+
<td rowspan="2">Knowledge</td>
|
112 |
+
<td>BoolQ</td><td>en</td><td>81.8</td><td>83.88</td><td>89.51</td><td>84.46</td><td>85.6</td><td>82.2</td><td>88.29</td>
|
113 |
+
</tr>
|
114 |
+
<tr>
|
115 |
+
<td>commonsense_qa</td><td>en</td><td>71.17</td><td>73.22</td><td>68.55</td><td>71.58</td><td>68.47</td><td>71.25</td><td>69.78</td>
|
116 |
+
</tr>
|
117 |
+
<tr>
|
118 |
+
<td rowspan="6">Understanding</td>
|
119 |
+
<td>C3</td><td>zh</td><td>91.51</td><td>92</td><td>93.04</td><td>85.86</td><td>81.64</td><td>83.51</td><td><strong>93.26</strong></td>
|
120 |
+
</tr>
|
121 |
+
<tr>
|
122 |
+
<td>race-middle</td><td>en</td><td>91.99</td><td>91.02</td><td>92.06</td><td>91.16</td><td>88.09</td><td>81.69</td><td>90.46</td>
|
123 |
+
</tr>
|
124 |
+
<tr>
|
125 |
+
<td>race-high</td><td>en</td><td>90.71</td><td>87.91</td><td>90.08</td><td>88.34</td><td>82.08</td><td>78.73</td><td>86.74</td>
|
126 |
+
</tr>
|
127 |
+
<tr>
|
128 |
+
<td>lcsts</td><td>zh</td><td>18.29</td><td>15.82</td><td>15.96</td><td>16.49</td><td>10.62</td><td>17.29</td><td><strong>18.61</strong></td>
|
129 |
+
</tr>
|
130 |
+
<tr>
|
131 |
+
<td>eprstmt-dev</td><td>zh</td><td>91.88</td><td>86.88</td><td>91.25</td><td>91.88</td><td>48.12</td><td>83.12</td><td>90</td>
|
132 |
+
</tr>
|
133 |
+
<tr>
|
134 |
+
<td>lambada</td><td>en</td><td>71.67</td><td>71.14</td><td>69.98</td><td>70.64</td><td>75.43</td><td>74.23</td><td>72.56</td>
|
135 |
+
</tr>
|
136 |
+
<tr>
|
137 |
+
<td rowspan="3">Reasoning</td>
|
138 |
+
<td>hellaswag</td><td>en</td><td>70.25</td><td>72.76</td><td>70.38</td><td>71.55</td><td>66.83</td><td>74.65</td><td>71.49</td>
|
139 |
+
</tr>
|
140 |
+
<tr>
|
141 |
+
<td>siqa</td><td>en</td><td>81.73</td><td>72.52</td><td>78.97</td><td>76.2</td><td>58.96</td><td>64.18</td><td>77.12</td>
|
142 |
+
</tr>
|
143 |
+
<tr>
|
144 |
+
<td>bbh</td><td>en</td><td>73.68</td><td>54.63</td><td>59.43</td><td>67.86</td><td>68.45</td><td>59.9</td><td>46.54</td>
|
145 |
+
</tr>
|
146 |
+
<tr>
|
147 |
+
<td rowspan="2">Code</td>
|
148 |
+
<td>humaneval</td><td>en</td><td>69.51</td><td>75</td><td>60.37</td><td>26.22</td><td>5.49</td><td>27.44</td><td>60.98</td>
|
149 |
+
</tr>
|
150 |
+
<tr>
|
151 |
+
<td>mbpp</td><td>en</td><td>60</td><td>60</td><td>43.6</td><td>56.8</td><td>51.2</td><td>42.6</td><td>54</td>
|
152 |
+
</tr>
|
153 |
+
<tr>
|
154 |
+
<td rowspan="2">Math</td>
|
155 |
+
<td>math</td><td>en</td><td>26.86</td><td>38</td><td>27.14</td><td>27.06</td><td>28.52</td><td>15.32</td><td><strong>38.34</strong></td>
|
156 |
+
</tr>
|
157 |
+
<tr>
|
158 |
+
<td>gsm8k</td><td>en</td><td>78.54</td><td>79.76</td><td>52.54</td><td>71.11</td><td>73.09</td><td>56.25</td><td>75.51</td>
|
159 |
+
</tr>
|
160 |
+
<tr>
|
161 |
+
<td rowspan="2">Overall</td>
|
162 |
+
<td>avg_zh</td><td></td><td>70.35</td><td>71.58</td><td>71.35</td><td>68.39</td><td>51.13</td><td>57.62</td><td><strong>71.74</strong></td>
|
163 |
+
</tr>
|
164 |
+
<tr>
|
165 |
+
<td>avg_all</td><td></td><td>73.11</td><td>71.78</td><td>69.60</td><td>68.88</td><td>61.60</td><td>62.32</td><td>70.61</td>
|
166 |
+
</tr>
|
167 |
+
</table>
|
168 |
+
|
169 |
+
|
170 |
+
<br>
|
171 |
+
|
172 |
+
# Quickstart
|
173 |
+
We provide simple examples illustrating the use of 360Zhinao2-7B-Base and 360Zhinao2-7B-Chat on 🤖ModelScope and 🤗Transformers.
|
174 |
+
|
175 |
+
## Dependency Installation
|
176 |
+
- python >= 3.8
|
177 |
+
- pytorch >= 2.0
|
178 |
+
- transformers >= 4.37.2
|
179 |
+
- CUDA >= 11.4
|
180 |
+
|
181 |
+
```shell
|
182 |
+
pip install -r requirements.txt
|
183 |
+
```
|
184 |
+
|
185 |
+
Optionally, we recommend installing Flash-Attention 2 to improve performance and reduce memory footprint.
|
186 |
+
|
187 |
+
>flash-attn >= 2.3.6
|
188 |
+
```shell
|
189 |
+
FLASH_ATTENTION_FORCE_BUILD=TRUE pip install flash-attn==2.3.6
|
190 |
+
```
|
191 |
+
|
192 |
+
## 🤗 Transformers
|
193 |
+
### Demonstration of Base Model Inference
|
194 |
+
|
195 |
+
```python
|
196 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
197 |
+
from transformers.generation import GenerationConfig
|
198 |
+
|
199 |
+
MODEL_NAME_OR_PATH = "qihoo360/360Zhinao2-7B-Base"
|
200 |
+
|
201 |
+
tokenizer = AutoTokenizer.from_pretrained(
|
202 |
+
MODEL_NAME_OR_PATH,
|
203 |
+
trust_remote_code=True)
|
204 |
+
|
205 |
+
model = AutoModelForCausalLM.from_pretrained(
|
206 |
+
MODEL_NAME_OR_PATH,
|
207 |
+
device_map="auto",
|
208 |
+
trust_remote_code=True)
|
209 |
+
|
210 |
+
generation_config = GenerationConfig.from_pretrained(
|
211 |
+
MODEL_NAME_OR_PATH,
|
212 |
+
trust_remote_code=True)
|
213 |
+
|
214 |
+
inputs = tokenizer('中国二十四节气\n1. 立春\n2. 雨水\n3. 惊蛰\n4. 春分\n5. 清明\n', return_tensors='pt')
|
215 |
+
inputs = inputs.to(model.device)
|
216 |
+
|
217 |
+
pred = model.generate(input_ids=inputs["input_ids"], generation_config=generation_config)
|
218 |
+
print("outputs:\n", tokenizer.decode(pred.cpu()[0], skip_special_tokens=True))
|
219 |
+
```
|
220 |
+
### Demonstration of Chat Model Inference
|
221 |
+
|
222 |
+
```python
|
223 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
224 |
+
from transformers.generation import GenerationConfig
|
225 |
+
|
226 |
+
MODEL_NAME_OR_PATH = "qihoo360/360Zhinao2-7B-Chat-4K"
|
227 |
+
|
228 |
+
tokenizer = AutoTokenizer.from_pretrained(
|
229 |
+
MODEL_NAME_OR_PATH,
|
230 |
+
trust_remote_code=True)
|
231 |
+
|
232 |
+
model = AutoModelForCausalLM.from_pretrained(
|
233 |
+
MODEL_NAME_OR_PATH,
|
234 |
+
device_map="auto",
|
235 |
+
trust_remote_code=True)
|
236 |
+
|
237 |
+
generation_config = GenerationConfig.from_pretrained(
|
238 |
+
MODEL_NAME_OR_PATH,
|
239 |
+
trust_remote_code=True)
|
240 |
+
|
241 |
+
messages = []
|
242 |
+
#round-1
|
243 |
+
messages.append({"role": "user", "content": "介绍一下刘德华"})
|
244 |
+
response = model.chat(tokenizer=tokenizer, messages=messages, generation_config=generation_config)
|
245 |
+
messages.append({"role": "assistant", "content": response})
|
246 |
+
print(messages)
|
247 |
+
|
248 |
+
#round-2
|
249 |
+
messages.append({"role": "user", "content": "他有什么代表作?"})
|
250 |
+
response = model.chat(tokenizer=tokenizer, messages=messages, generation_config=generation_config)
|
251 |
+
messages.append({"role": "assistant", "content": response})
|
252 |
+
print(messages)
|
253 |
+
```
|
254 |
+
|
255 |
+
## 🤖 ModelScope
|
256 |
+
### Demonstration of Base Model Inference
|
257 |
+
|
258 |
+
```python
|
259 |
+
from modelscope import AutoModelForCausalLM, AutoTokenizer
|
260 |
+
from modelscope import GenerationConfig
|
261 |
+
|
262 |
+
MODEL_NAME_OR_PATH = "qihoo360/360Zhinao2-7B-Base"
|
263 |
+
|
264 |
+
tokenizer = AutoTokenizer.from_pretrained(
|
265 |
+
MODEL_NAME_OR_PATH,
|
266 |
+
trust_remote_code=True)
|
267 |
+
|
268 |
+
model = AutoModelForCausalLM.from_pretrained(
|
269 |
+
MODEL_NAME_OR_PATH,
|
270 |
+
device_map="auto",
|
271 |
+
trust_remote_code=True)
|
272 |
+
|
273 |
+
generation_config = GenerationConfig.from_pretrained(
|
274 |
+
MODEL_NAME_OR_PATH,
|
275 |
+
trust_remote_code=True)
|
276 |
+
|
277 |
+
inputs = tokenizer('中国二十四节气\n1. 立春\n2. 雨水\n3. 惊蛰\n4. 春分\n5. 清明\n', return_tensors='pt')
|
278 |
+
inputs = inputs.to(model.device)
|
279 |
+
|
280 |
+
pred = model.generate(input_ids=inputs["input_ids"], generation_config=generation_config)
|
281 |
+
print("outputs:\n", tokenizer.decode(pred.cpu()[0], skip_special_tokens=True))
|
282 |
+
```
|
283 |
+
|
284 |
+
### Demonstration of Chat Model Inference
|
285 |
+
|
286 |
+
```python
|
287 |
+
from modelscope import AutoModelForCausalLM, AutoTokenizer
|
288 |
+
from modelscope import GenerationConfig
|
289 |
+
|
290 |
+
MODEL_NAME_OR_PATH = "qihoo360/360Zhinao2-7B-Chat-4K"
|
291 |
+
|
292 |
+
tokenizer = AutoTokenizer.from_pretrained(
|
293 |
+
MODEL_NAME_OR_PATH,
|
294 |
+
trust_remote_code=True)
|
295 |
+
|
296 |
+
model = AutoModelForCausalLM.from_pretrained(
|
297 |
+
MODEL_NAME_OR_PATH,
|
298 |
+
device_map="auto",
|
299 |
+
trust_remote_code=True)
|
300 |
+
|
301 |
+
generation_config = GenerationConfig.from_pretrained(
|
302 |
+
MODEL_NAME_OR_PATH,
|
303 |
+
trust_remote_code=True)
|
304 |
+
|
305 |
+
messages = []
|
306 |
+
#round-1
|
307 |
+
messages.append({"role": "user", "content": "介绍一下刘德华"})
|
308 |
+
response = model.chat(tokenizer=tokenizer, messages=messages, generation_config=generation_config)
|
309 |
+
messages.append({"role": "assistant", "content": response})
|
310 |
+
print(messages)
|
311 |
+
|
312 |
+
#round-2
|
313 |
+
messages.append({"role": "user", "content": "他有什么代表作?"})
|
314 |
+
response = model.chat(tokenizer=tokenizer, messages=messages, generation_config=generation_config)
|
315 |
+
messages.append({"role": "assistant", "content": response})
|
316 |
+
print(messages)
|
317 |
+
```
|
318 |
+
|
319 |
+
## CLI Demo
|
320 |
+
Use terminal for command-line interface:
|
321 |
+
|
322 |
+
```shell
|
323 |
+
python cli_demo.py
|
324 |
+
```
|
325 |
+
<p align="center">
|
326 |
+
<img src="assets/cli_demo.gif" width="600" />
|
327 |
+
<p>
|
328 |
+
|
329 |
+
Note: for Mac users, `device = 'mps'` is not supported yet.
|
330 |
+
|
331 |
+
## Web Demo
|
332 |
+
|
333 |
+
```shell
|
334 |
+
streamlit run web_demo.py
|
335 |
+
```
|
336 |
+
<p align="center">
|
337 |
+
<img src="assets/web_demo.gif" width="600" />
|
338 |
+
<p>
|
339 |
+
|
340 |
+
## API Demo
|
341 |
+
Launch api:
|
342 |
+
```shell
|
343 |
+
python openai_api.py
|
344 |
+
```
|
345 |
+
|
346 |
+
Then request with parameters:
|
347 |
+
```shell
|
348 |
+
curl 'http://localhost:8360/v1/chat/completions' \
|
349 |
+
-H 'Content-Type: application/json' \
|
350 |
+
-d '{
|
351 |
+
"max_new_tokens": 200,
|
352 |
+
"do_sample": true,
|
353 |
+
"top_k": 0,
|
354 |
+
"top_p": 0.8,
|
355 |
+
"temperature": 1.0,
|
356 |
+
"repetition_penalty": 1.0,
|
357 |
+
"messages": [
|
358 |
+
{"role": "system", "content": "You are a helpful assistant."},
|
359 |
+
{"role": "user", "content": "你好"}
|
360 |
+
]
|
361 |
+
}'
|
362 |
+
```
|
363 |
+
|
364 |
+
<br>
|
365 |
+
|
366 |
+
# Model Inference
|
367 |
+
## Quantization
|
368 |
+
We provide quantization schemes based on AutoGPTQ and release the Int4 quantization models.
|
369 |
+
|
370 |
+
## Deployment
|
371 |
+
### vLLM Installation
|
372 |
+
We recommend using `vLLM==0.3.3`.
|
373 |
+
|
374 |
+
If you are using **CUDA 12.1 and PyTorch 2.1**, you can install vLLM directly with:
|
375 |
+
```shell
|
376 |
+
pip install vllm==0.3.3
|
377 |
+
```
|
378 |
+
|
379 |
+
Otherwise, please refer to the official vLLM [Installation Instructions](https://docs.vllm.ai/en/latest/getting_started/installation.html).
|
380 |
+
|
381 |
+
After installation, perform the following steps:
|
382 |
+
1. Copy `vllm/zhinao.py` into `vllm/model_executor/models` in your vllm installation directory (in python/conda env).
|
383 |
+
2. Copy `vllm/serving_chat.py` into `vllm/entrypoints/openai` in your vllm installation directory.
|
384 |
+
3. Then add a line in `vllm/model_executor/models/__init__.py`
|
385 |
+
|
386 |
+
```shell
|
387 |
+
"ZhinaoForCausalLM": ("zhinao", "ZhinaoForCausalLM"),
|
388 |
+
```
|
389 |
+
|
390 |
+
### vLLM Service Start
|
391 |
+
|
392 |
+
Start the service:
|
393 |
+
```shell
|
394 |
+
python -m vllm.entrypoints.openai.api_server \
|
395 |
+
--served-model-name 360Zhinao2-7B-Chat-4K \
|
396 |
+
--model qihoo360/360Zhinao2-7B-Chat-4K \
|
397 |
+
--trust-remote-code \
|
398 |
+
--tensor-parallel-size 1 \
|
399 |
+
--max-model-len 4096 \
|
400 |
+
--host 0.0.0.0 \
|
401 |
+
--port 8360
|
402 |
+
```
|
403 |
+
|
404 |
+
Use curl to request the service:
|
405 |
+
```shell
|
406 |
+
curl http://localhost:8360/v1/chat/completions \
|
407 |
+
-H "Content-Type: application/json" \
|
408 |
+
-d '{
|
409 |
+
"model": "360Zhinao2-7B-Chat-4K",
|
410 |
+
"max_tokens": 200,
|
411 |
+
"top_k": -1,
|
412 |
+
"top_p": 0.8,
|
413 |
+
"temperature": 1.0,
|
414 |
+
"presence_penalty": 0.0,
|
415 |
+
"frequency_penalty": 0.0,
|
416 |
+
"messages": [
|
417 |
+
{"role": "system", "content": "You are a helpful assistant."},
|
418 |
+
{"role": "user", "content": "你好"}
|
419 |
+
],
|
420 |
+
"stop": [
|
421 |
+
"<eod>",
|
422 |
+
"<|im_end|>",
|
423 |
+
"<|im_start|>"
|
424 |
+
]
|
425 |
+
}'
|
426 |
+
```
|
427 |
+
Use python to request the service:
|
428 |
+
```python
|
429 |
+
from openai import OpenAI
|
430 |
+
openai_api_key = "EMPTY"
|
431 |
+
openai_api_base = "http://localhost:8360/v1"
|
432 |
+
|
433 |
+
client = OpenAI(
|
434 |
+
api_key=openai_api_key,
|
435 |
+
base_url=openai_api_base,
|
436 |
+
)
|
437 |
+
|
438 |
+
chat_response = client.chat.completions.create(
|
439 |
+
model="360Zhinao2-7B-Chat-4K",
|
440 |
+
messages=[
|
441 |
+
{"role": "system", "content": "You are a helpful assistant."},
|
442 |
+
{"role": "user", "content": "你好"},
|
443 |
+
],
|
444 |
+
stop=[
|
445 |
+
"<eod>",
|
446 |
+
"<|im_end|>",
|
447 |
+
"<|im_start|>"
|
448 |
+
],
|
449 |
+
presence_penalty=0.0,
|
450 |
+
frequency_penalty=0.0
|
451 |
+
)
|
452 |
+
print("Chat response:", chat_response)
|
453 |
+
```
|
454 |
+
|
455 |
+
> If you need to enable repetition penalty, we recommend setting `presence_penalty` and `frequency_penalty` instead of `repetition_penalty`.
|
456 |
+
|
457 |
+
|
458 |
+
<br>
|
459 |
+
|
460 |
+
# Model Finetune
|
461 |
+
## Training data
|
462 |
+
|
463 |
+
Training Data: `data/training_data_sample.json`. This example data has 10,000 rows sampled from [multiturn_chat_0.8M](https://huggingface.co/datasets/BelleGroup/multiturn_chat_0.8M) with converted format.
|
464 |
+
|
465 |
+
Data Format:
|
466 |
+
```json
|
467 |
+
[
|
468 |
+
{
|
469 |
+
"id": 1,
|
470 |
+
"conversations": [
|
471 |
+
{
|
472 |
+
"from": "system",
|
473 |
+
"value": "You are a helpful assistant."
|
474 |
+
},
|
475 |
+
{
|
476 |
+
"from": "user",
|
477 |
+
"value": "您好啊"
|
478 |
+
},
|
479 |
+
{
|
480 |
+
"from": "assistant",
|
481 |
+
"value": "你好!我今天能为您做些什么?有什么问题或需要帮助吗? 我在这里为您提供服务。"
|
482 |
+
}
|
483 |
+
]
|
484 |
+
}
|
485 |
+
]
|
486 |
+
```
|
487 |
+
## Finetuning scripts
|
488 |
+
```shell
|
489 |
+
set -x
|
490 |
+
|
491 |
+
HOSTFILE=hostfile
|
492 |
+
DS_CONFIG=./finetune/ds_config_zero2.json
|
493 |
+
|
494 |
+
# PARAMS
|
495 |
+
LR=5e-6
|
496 |
+
EPOCHS=3
|
497 |
+
MAX_LEN=4096
|
498 |
+
BATCH_SIZE=4
|
499 |
+
NUM_NODES=1
|
500 |
+
NUM_GPUS=8
|
501 |
+
MASTER_PORT=29500
|
502 |
+
|
503 |
+
IS_CONCAT=False # Whether to concatenate to maximum length (MAX_LEN)
|
504 |
+
|
505 |
+
DATA_PATH="./data/training_data_sample.json"
|
506 |
+
MODEL_PATH="qihoo360/360Zhinao2-7B-Base"
|
507 |
+
OUTPUT_DIR="./outputs/"
|
508 |
+
|
509 |
+
deepspeed --hostfile ${HOSTFILE} \
|
510 |
+
--master_port ${MASTER_PORT} \
|
511 |
+
--num_nodes ${NUM_NODES} \
|
512 |
+
--num_gpus ${NUM_GPUS} \
|
513 |
+
finetune.py \
|
514 |
+
--report_to "tensorboard" \
|
515 |
+
--data_path ${DATA_PATH} \
|
516 |
+
--model_name_or_path ${MODEL_PATH} \
|
517 |
+
--output_dir ${OUTPUT_DIR} \
|
518 |
+
--model_max_length ${MAX_LEN} \
|
519 |
+
--num_train_epochs ${EPOCHS} \
|
520 |
+
--per_device_train_batch_size ${BATCH_SIZE} \
|
521 |
+
--gradient_accumulation_steps 1 \
|
522 |
+
--save_strategy steps \
|
523 |
+
--save_steps 200 \
|
524 |
+
--learning_rate ${LR} \
|
525 |
+
--lr_scheduler_type cosine \
|
526 |
+
--adam_beta1 0.9 \
|
527 |
+
--adam_beta2 0.95 \
|
528 |
+
--adam_epsilon 1e-8 \
|
529 |
+
--max_grad_norm 1.0 \
|
530 |
+
--weight_decay 0.1 \
|
531 |
+
--warmup_ratio 0.01 \
|
532 |
+
--gradient_checkpointing True \
|
533 |
+
--bf16 True \
|
534 |
+
--tf32 True \
|
535 |
+
--deepspeed ${DS_CONFIG} \
|
536 |
+
--is_concat ${IS_CONCAT} \
|
537 |
+
--logging_steps 1 \
|
538 |
+
--log_on_each_node False
|
539 |
+
```
|
540 |
+
```shell
|
541 |
+
bash finetune/ds_finetune.sh
|
542 |
+
```
|
543 |
+
- Configuring `HOSTFILE` switches between single-machine and multi-machine training.
|
544 |
+
- configuring `ds_config` switches between zero1, zero2 and zero3.
|
545 |
+
- `fp16, bf16` could configure mixed precision training. bf16 is recommended to be consistent with the pretrained model.
|
546 |
+
- `is_concat` configures whether the training data is concatenated or not.
|
547 |
+
|
548 |
+
<br>
|
549 |
+
|
550 |
+
# License
|
551 |
+
|
552 |
+
The source code of this repository follows the open-source license Apache 2.0.
|
553 |
+
|
554 |
+
360Zhinao open-source models support free commercial use. It is not necessary for you to submit a request for commercial usage.
|
README_CN.md
ADDED
@@ -0,0 +1,564 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
license: apache-2.0
|
3 |
+
language:
|
4 |
+
- zh
|
5 |
+
- en
|
6 |
+
library_name: transformers
|
7 |
+
tags:
|
8 |
+
- qihoo360
|
9 |
+
- 奇虎360
|
10 |
+
- zhinao
|
11 |
+
- 360Zhinao
|
12 |
+
- pretrain
|
13 |
+
---
|
14 |
+
|
15 |
+
<p align="left">
|
16 |
+
中文 |   <a href="./README.md">English</a></a> 
|
17 |
+
</p>
|
18 |
+
<br>
|
19 |
+
|
20 |
+
<div align="center">
|
21 |
+
<h1>
|
22 |
+
360智脑
|
23 |
+
</h1>
|
24 |
+
</div>
|
25 |
+
<div align="center">
|
26 |
+
🤗 <a href="https://huggingface.co/qihoo360">Hugging Face</a>   |   
|
27 |
+
🤖 <a href="https://www.modelscope.cn/profile/qihoo360">ModelScope</a>   |   
|
28 |
+
💬 <a href="./assets/WeChat.png">WeChat (微信)</a>  
|
29 |
+
</div>
|
30 |
+
<br>
|
31 |
+
<p align="center">
|
32 |
+
欢迎访问360智脑官网<a href="https://ai.360.com"> https://ai.360.com </a>体验更多更强大的功能。
|
33 |
+
</p>
|
34 |
+
|
35 |
+
<br>
|
36 |
+
|
37 |
+
# 模型介绍
|
38 |
+
🎉🎉🎉我们开源了360智脑大模型的系列工作,本次开源了以下模型:
|
39 |
+
- **360Zhinao2-7B-Base**
|
40 |
+
- **360Zhinao2-7B-Chat-4K**
|
41 |
+
- **360Zhinao2-7B-Chat-32K**
|
42 |
+
- **360Zhinao2-7B-Chat-360K**
|
43 |
+
|
44 |
+
360智脑大模型特点如下:
|
45 |
+
- **基础模型**:采⽤当前主流的两阶段训练⽅法,第⼀阶段采用cosine学习率总共训练10T
|
46 |
+
token,第二阶段我们加⼤了⾼质量数据的占⽐,训练了100B⾼质量token,学习率LR直接decay到0。**360Zhinao2-7B总共训练数据量达10.1T token**。
|
47 |
+
- **对话模型**:具有强大的对话能力,开放4K、32K、360K三种不同文本长度。
|
48 |
+
|
49 |
+
<br>
|
50 |
+
|
51 |
+
# 更新信息
|
52 |
+
- [2024.11.18] 🔥🔥🔥我们发布了360Zhinao2-7B,同时开放Base模型和4K、32K、360K三种文本长度的Chat模型。
|
53 |
+
- [2024.05.23] 我们发布了360Zhinao-search以及360Zhinao-1.8B-Reranking两个模型,分别在[C-MTEB 榜单](https://huggingface.co/spaces/mteb/leaderboard)的Retrieval和Reranking任务上排名第一。
|
54 |
+
- [2024.05.20] 我们将llama3的窗口长度扩展到360k并发布了**llama3-8B-360Zhinao-360k-Instruct**<a href="https://huggingface.co/qihoo360/llama3-8B-360Zhinao-360k-Instruct">🤗</a>
|
55 |
+
- [2024.04.12] 我们发布了360Zhinao-7B 1.0版本,同时开放Base模型和4K、32K、360K三种文本长度的Chat模型。
|
56 |
+
技术报告详见[arXiv](https://arxiv.org/abs/2405.13386)。
|
57 |
+
|
58 |
+
<br>
|
59 |
+
|
60 |
+
# 目录
|
61 |
+
- [下载地址](#下载地址)
|
62 |
+
- [模型评估](#模型评估)
|
63 |
+
- [快速开始](#快速开始)
|
64 |
+
- [模型推理](#模型推理)
|
65 |
+
- [模型微调](#模型微调)
|
66 |
+
- [许可证](#许可证)
|
67 |
+
|
68 |
+
<br>
|
69 |
+
|
70 |
+
# 下载地址
|
71 |
+
本次发布版本和下载链接见下表:
|
72 |
+
| Size | Model | BF16 | Int4|
|
73 |
+
|:-:|-|:-:|:-:|
|
74 |
+
| 7B | 360Zhinao2-7B-Base | <a href="https://www.modelscope.cn/models/qihoo360/360Zhinao2-7B-Base/summary">🤖</a> <a href="https://huggingface.co/qihoo360/360Zhinao2-7B-Base">🤗</a> | |
|
75 |
+
| 7B | 360Zhinao2-7B-Chat-4K | <a href="https://www.modelscope.cn/models/qihoo360/360Zhinao2-7B-Chat-4K/summary">🤖</a> <a href="https://huggingface.co/qihoo360/360Zhinao2-7B-Chat-4K">🤗</a> | <a href="https://www.modelscope.cn/models/qihoo360/360Zhinao2-7B-Chat-4K-Int4/summary">🤖</a> <a href="https://huggingface.co/qihoo360/360Zhinao2-7B-Chat-4K-Int4">🤗</a> |
|
76 |
+
| 7B | 360Zhinao2-7B-Chat-32K | <a href="https://www.modelscope.cn/models/qihoo360/360Zhinao2-7B-Chat-32K/summary">🤖</a> <a href="https://huggingface.co/qihoo360/360Zhinao2-7B-Chat-32K">🤗</a> | <a href="https://www.modelscope.cn/models/qihoo360/360Zhinao2-7B-Chat-32K-Int4/summary">🤖</a> <a href="https://huggingface.co/qihoo360/360Zhinao2-7B-Chat-32K-Int4">🤗</a> |
|
77 |
+
| 7B | 360Zhinao2-7B-Chat-360K | <a href="https://www.modelscope.cn/models/qihoo360/360Zhinao2-7B-Chat-360K/summary">🤖</a> <a href="https://huggingface.co/qihoo360/360Zhinao2-7B-Chat-360K">🤗</a> | <a href="https://www.modelscope.cn/models/qihoo360/360Zhinao2-7B-Chat-360K-Int4/summary">🤖</a> <a href="https://huggingface.co/qihoo360/360Zhinao2-7B-Chat-360K-Int4">🤗</a> |
|
78 |
+
|
79 |
+
<br>
|
80 |
+
|
81 |
+
# 模型评估
|
82 |
+
我们使⽤了开源⼯具opencompass对模型进⾏评估,对⽐了近半年国内外开源的10B以下模型,
|
83 |
+
360Zhinao2-7B具备较强的竞争⼒。360Zhinao2-7B在CEval(中⽂
|
84 |
+
考试)、C3(中⽂阅读理解)、lcsts(中⽂短⽂本摘要)等中⽂benchmark上表现不俗,中⽂
|
85 |
+
benchmark均分排名第⼀。在挑战性的竞赛数学数据集math上,同样排名第⼀。**360Zhinao2-7B模
|
86 |
+
型在中⽂处理能⼒、复杂数学推理能⼒两个⽅⾯,具备优势。**
|
87 |
+
|
88 |
+
<table>
|
89 |
+
<tr>
|
90 |
+
<td>Type</td><td>Datasets</td><td>language</td><td>glm4-9b</td><td>Qwen2.5-7B</td><td>internlm2.5-7b</td><td>Yi1.5-9B</td><td>gemma2-9b</td><td>Llama3.1-8B</td><td>360Zhinao2-7B</td>
|
91 |
+
</tr>
|
92 |
+
<tr>
|
93 |
+
<td rowspan="5">Exam</td><td>ceval</td><td>zh</td><td>75.83</td><td>81.41</td><td>77.71</td><td>73.51</td><td>56.36</td><td>51.67</td><td><strong>83.04</strong></td>
|
94 |
+
</tr>
|
95 |
+
<tr>
|
96 |
+
<td>mmlu</td><td>en</td><td>75.5</td><td>75.5</td><td>71.55</td><td>71.43</td><td>72.22</td><td>66.75</td><td>67.84</td>
|
97 |
+
</tr>
|
98 |
+
<tr>
|
99 |
+
<td>cmmlu</td><td>zh</td><td>74.24</td><td>81.79</td><td>78.77</td><td>74.2</td><td>58.89</td><td>52.49</td><td>73.8</td>
|
100 |
+
</tr>
|
101 |
+
<tr>
|
102 |
+
<td>ARC-c</td><td>en</td><td>94.92</td><td>80</td><td>85.08</td><td>87.46</td><td>77.63</td><td>80.68</td><td>87.12</td>
|
103 |
+
</tr>
|
104 |
+
<tr>
|
105 |
+
<td>ARC-e</td><td>en</td><td>98.41</td><td>84.83</td><td>95.24</td><td>94.53</td><td>78.84</td><td>89.77</td><td>92.77</td>
|
106 |
+
</tr>
|
107 |
+
<tr>
|
108 |
+
<td rowspan="2">Language</td><td>WiC</td><td>en</td><td>51.57</td><td>52.82</td><td>50.78</td><td>50.63</td><td>50.47</td><td>50</td><td>49.84</td>
|
109 |
+
</tr>
|
110 |
+
<tr>
|
111 |
+
<td>WSC</td><td>en</td><td>68.27</td><td>68.27</td><td>69.23</td><td>66.35</td><td>68.27</td><td>67.31</td><td>65.38</td>
|
112 |
+
</tr>
|
113 |
+
<tr>
|
114 |
+
<td rowspan="2">Knowledge</td>
|
115 |
+
<td>BoolQ</td><td>en</td><td>81.8</td><td>83.88</td><td>89.51</td><td>84.46</td><td>85.6</td><td>82.2</td><td>88.29</td>
|
116 |
+
</tr>
|
117 |
+
<tr>
|
118 |
+
<td>commonsense_qa</td><td>en</td><td>71.17</td><td>73.22</td><td>68.55</td><td>71.58</td><td>68.47</td><td>71.25</td><td>69.78</td>
|
119 |
+
</tr>
|
120 |
+
<tr>
|
121 |
+
<td rowspan="6">Understanding</td>
|
122 |
+
<td>C3</td><td>zh</td><td>91.51</td><td>92</td><td>93.04</td><td>85.86</td><td>81.64</td><td>83.51</td><td><strong>93.26</strong></td>
|
123 |
+
</tr>
|
124 |
+
<tr>
|
125 |
+
<td>race-middle</td><td>en</td><td>91.99</td><td>91.02</td><td>92.06</td><td>91.16</td><td>88.09</td><td>81.69</td><td>90.46</td>
|
126 |
+
</tr>
|
127 |
+
<tr>
|
128 |
+
<td>race-high</td><td>en</td><td>90.71</td><td>87.91</td><td>90.08</td><td>88.34</td><td>82.08</td><td>78.73</td><td>86.74</td>
|
129 |
+
</tr>
|
130 |
+
<tr>
|
131 |
+
<td>lcsts</td><td>zh</td><td>18.29</td><td>15.82</td><td>15.96</td><td>16.49</td><td>10.62</td><td>17.29</td><td><strong>18.61</strong></td>
|
132 |
+
</tr>
|
133 |
+
<tr>
|
134 |
+
<td>eprstmt-dev</td><td>zh</td><td>91.88</td><td>86.88</td><td>91.25</td><td>91.88</td><td>48.12</td><td>83.12</td><td>90</td>
|
135 |
+
</tr>
|
136 |
+
<tr>
|
137 |
+
<td>lambada</td><td>en</td><td>71.67</td><td>71.14</td><td>69.98</td><td>70.64</td><td>75.43</td><td>74.23</td><td>72.56</td>
|
138 |
+
</tr>
|
139 |
+
<tr>
|
140 |
+
<td rowspan="3">Reasoning</td>
|
141 |
+
<td>hellaswag</td><td>en</td><td>70.25</td><td>72.76</td><td>70.38</td><td>71.55</td><td>66.83</td><td>74.65</td><td>71.49</td>
|
142 |
+
</tr>
|
143 |
+
<tr>
|
144 |
+
<td>siqa</td><td>en</td><td>81.73</td><td>72.52</td><td>78.97</td><td>76.2</td><td>58.96</td><td>64.18</td><td>77.12</td>
|
145 |
+
</tr>
|
146 |
+
<tr>
|
147 |
+
<td>bbh</td><td>en</td><td>73.68</td><td>54.63</td><td>59.43</td><td>67.86</td><td>68.45</td><td>59.9</td><td>46.54</td>
|
148 |
+
</tr>
|
149 |
+
<tr>
|
150 |
+
<td rowspan="2">Code</td>
|
151 |
+
<td>humaneval</td><td>en</td><td>69.51</td><td>75</td><td>60.37</td><td>26.22</td><td>5.49</td><td>27.44</td><td>60.98</td>
|
152 |
+
</tr>
|
153 |
+
<tr>
|
154 |
+
<td>mbpp</td><td>en</td><td>60</td><td>60</td><td>43.6</td><td>56.8</td><td>51.2</td><td>42.6</td><td>54</td>
|
155 |
+
</tr>
|
156 |
+
<tr>
|
157 |
+
<td rowspan="2">Math</td>
|
158 |
+
<td>math</td><td>en</td><td>26.86</td><td>38</td><td>27.14</td><td>27.06</td><td>28.52</td><td>15.32</td><td><strong>38.34</strong></td>
|
159 |
+
</tr>
|
160 |
+
<tr>
|
161 |
+
<td>gsm8k</td><td>en</td><td>78.54</td><td>79.76</td><td>52.54</td><td>71.11</td><td>73.09</td><td>56.25</td><td>75.51</td>
|
162 |
+
</tr>
|
163 |
+
<tr>
|
164 |
+
<td rowspan="2">Overall</td>
|
165 |
+
<td>avg_zh</td><td></td><td>70.35</td><td>71.58</td><td>71.35</td><td>68.39</td><td>51.13</td><td>57.62</td><td><strong>71.74</strong></td>
|
166 |
+
</tr>
|
167 |
+
<tr>
|
168 |
+
<td>avg_all</td><td></td><td>73.11</td><td>71.78</td><td>69.60</td><td>68.88</td><td>61.60</td><td>62.32</td><td>70.61</td>
|
169 |
+
</tr>
|
170 |
+
</table>
|
171 |
+
|
172 |
+
## 基础模型
|
173 |
+
|
174 |
+
# 快速开始
|
175 |
+
简单的示例来说明如何利用🤖 ModelScope和🤗 Transformers快速使用360Zhinao2-7B-Base和360Zhinao2-7B-Chat
|
176 |
+
|
177 |
+
## 依赖安装
|
178 |
+
- python 3.8 and above
|
179 |
+
- pytorch 2.0 and above
|
180 |
+
- transformers 4.37.2 and above
|
181 |
+
- CUDA 11.4 and above are recommended.
|
182 |
+
|
183 |
+
```shell
|
184 |
+
pip install -r requirements.txt
|
185 |
+
```
|
186 |
+
我们推荐安装flash-attention(当前已支持flash attention 2)来提高你的运行效率以及降低显存占用。(flash-attention只是可选项,不安装也可正常运行该项目)
|
187 |
+
|
188 |
+
>flash-attn >= 2.3.6
|
189 |
+
```shell
|
190 |
+
FLASH_ATTENTION_FORCE_BUILD=TRUE pip install flash-attn==2.3.6
|
191 |
+
```
|
192 |
+
|
193 |
+
|
194 |
+
## 🤗 Transformers
|
195 |
+
### Base模型推理
|
196 |
+
|
197 |
+
此代码演示使用transformers快速使用360Zhinao2-7B-Base模型进行推理
|
198 |
+
```python
|
199 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
200 |
+
from transformers.generation import GenerationConfig
|
201 |
+
|
202 |
+
MODEL_NAME_OR_PATH = "qihoo360/360Zhinao2-7B-Base"
|
203 |
+
|
204 |
+
tokenizer = AutoTokenizer.from_pretrained(
|
205 |
+
MODEL_NAME_OR_PATH,
|
206 |
+
trust_remote_code=True)
|
207 |
+
|
208 |
+
model = AutoModelForCausalLM.from_pretrained(
|
209 |
+
MODEL_NAME_OR_PATH,
|
210 |
+
device_map="auto",
|
211 |
+
trust_remote_code=True)
|
212 |
+
|
213 |
+
generation_config = GenerationConfig.from_pretrained(
|
214 |
+
MODEL_NAME_OR_PATH,
|
215 |
+
trust_remote_code=True)
|
216 |
+
|
217 |
+
inputs = tokenizer('中国二十四节气\n1. 立春\n2. 雨水\n3. 惊蛰\n4. 春分\n5. 清明\n', return_tensors='pt')
|
218 |
+
inputs = inputs.to(model.device)
|
219 |
+
|
220 |
+
pred = model.generate(input_ids=inputs["input_ids"], generation_config=generation_config)
|
221 |
+
print("outputs:\n", tokenizer.decode(pred.cpu()[0], skip_special_tokens=True))
|
222 |
+
```
|
223 |
+
|
224 |
+
### Chat模型推理
|
225 |
+
|
226 |
+
此代码演示使用transformers快速使用360Zhinao2-7B-Chat-4K模型进行推理
|
227 |
+
```python
|
228 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
229 |
+
from transformers.generation import GenerationConfig
|
230 |
+
|
231 |
+
MODEL_NAME_OR_PATH = "qihoo360/360Zhinao2-7B-Chat-4K"
|
232 |
+
|
233 |
+
tokenizer = AutoTokenizer.from_pretrained(
|
234 |
+
MODEL_NAME_OR_PATH,
|
235 |
+
trust_remote_code=True)
|
236 |
+
|
237 |
+
model = AutoModelForCausalLM.from_pretrained(
|
238 |
+
MODEL_NAME_OR_PATH,
|
239 |
+
device_map="auto",
|
240 |
+
trust_remote_code=True)
|
241 |
+
|
242 |
+
generation_config = GenerationConfig.from_pretrained(
|
243 |
+
MODEL_NAME_OR_PATH,
|
244 |
+
trust_remote_code=True)
|
245 |
+
|
246 |
+
messages = []
|
247 |
+
#round-1
|
248 |
+
messages.append({"role": "user", "content": "介绍一下刘德华"})
|
249 |
+
response = model.chat(tokenizer=tokenizer, messages=messages, generation_config=generation_config)
|
250 |
+
messages.append({"role": "assistant", "content": response})
|
251 |
+
print(messages)
|
252 |
+
|
253 |
+
#round-2
|
254 |
+
messages.append({"role": "user", "content": "他有什么代表作?"})
|
255 |
+
response = model.chat(tokenizer=tokenizer, messages=messages, generation_config=generation_config)
|
256 |
+
messages.append({"role": "assistant", "content": response})
|
257 |
+
print(messages)
|
258 |
+
```
|
259 |
+
|
260 |
+
## 🤖 ModelScope
|
261 |
+
### Base模型推理
|
262 |
+
|
263 |
+
此代码演示使用ModelScope快速使用360Zhinao2-7B-Base模型进行推理
|
264 |
+
|
265 |
+
|
266 |
+
```python
|
267 |
+
from modelscope import AutoModelForCausalLM, AutoTokenizer
|
268 |
+
from modelscope import GenerationConfig
|
269 |
+
|
270 |
+
MODEL_NAME_OR_PATH = "qihoo360/360Zhinao2-7B-Base"
|
271 |
+
|
272 |
+
tokenizer = AutoTokenizer.from_pretrained(
|
273 |
+
MODEL_NAME_OR_PATH,
|
274 |
+
trust_remote_code=True)
|
275 |
+
|
276 |
+
model = AutoModelForCausalLM.from_pretrained(
|
277 |
+
MODEL_NAME_OR_PATH,
|
278 |
+
device_map="auto",
|
279 |
+
trust_remote_code=True)
|
280 |
+
|
281 |
+
generation_config = GenerationConfig.from_pretrained(
|
282 |
+
MODEL_NAME_OR_PATH,
|
283 |
+
trust_remote_code=True)
|
284 |
+
|
285 |
+
inputs = tokenizer('中国二十四节气\n1. 立春\n2. 雨水\n3. 惊蛰\n4. 春分\n5. 清明\n', return_tensors='pt')
|
286 |
+
inputs = inputs.to(model.device)
|
287 |
+
|
288 |
+
pred = model.generate(input_ids=inputs["input_ids"], generation_config=generation_config)
|
289 |
+
print("outputs:\n", tokenizer.decode(pred.cpu()[0], skip_special_tokens=True))
|
290 |
+
```
|
291 |
+
|
292 |
+
### Chat模型推理
|
293 |
+
|
294 |
+
此代码演示使用ModelScope快速使用360Zhinao2-7B-Chat-4K模型进行推理
|
295 |
+
```python
|
296 |
+
from modelscope import AutoModelForCausalLM, AutoTokenizer
|
297 |
+
from modelscope import GenerationConfig
|
298 |
+
|
299 |
+
MODEL_NAME_OR_PATH = "qihoo360/360Zhinao2-7B-Chat-4K"
|
300 |
+
|
301 |
+
tokenizer = AutoTokenizer.from_pretrained(
|
302 |
+
MODEL_NAME_OR_PATH,
|
303 |
+
trust_remote_code=True)
|
304 |
+
|
305 |
+
model = AutoModelForCausalLM.from_pretrained(
|
306 |
+
MODEL_NAME_OR_PATH,
|
307 |
+
device_map="auto",
|
308 |
+
trust_remote_code=True)
|
309 |
+
|
310 |
+
generation_config = GenerationConfig.from_pretrained(
|
311 |
+
MODEL_NAME_OR_PATH,
|
312 |
+
trust_remote_code=True)
|
313 |
+
|
314 |
+
messages = []
|
315 |
+
#round-1
|
316 |
+
messages.append({"role": "user", "content": "介绍一下刘德华"})
|
317 |
+
response = model.chat(tokenizer=tokenizer, messages=messages, generation_config=generation_config)
|
318 |
+
messages.append({"role": "assistant", "content": response})
|
319 |
+
print(messages)
|
320 |
+
|
321 |
+
#round-2
|
322 |
+
messages.append({"role": "user", "content": "他有什么代表作?"})
|
323 |
+
response = model.chat(tokenizer=tokenizer, messages=messages, generation_config=generation_config)
|
324 |
+
messages.append({"role": "assistant", "content": response})
|
325 |
+
print(messages)
|
326 |
+
```
|
327 |
+
|
328 |
+
## 终端 Demo
|
329 |
+
可使用终端交互实现快速体验
|
330 |
+
```shell
|
331 |
+
python cli_demo.py
|
332 |
+
```
|
333 |
+
<p align="center">
|
334 |
+
<img src="assets/cli_demo.gif" width="600" />
|
335 |
+
<p>
|
336 |
+
|
337 |
+
注:我们尚未支持Mac上`device = 'mps'`。
|
338 |
+
|
339 |
+
## 网页 Demo
|
340 |
+
也可使用网页交互实现快速体验
|
341 |
+
```shell
|
342 |
+
streamlit run web_demo.py
|
343 |
+
```
|
344 |
+
<p align="center">
|
345 |
+
<img src="assets/web_demo.gif" width="600" />
|
346 |
+
<p>
|
347 |
+
|
348 |
+
## API Demo
|
349 |
+
启动命令
|
350 |
+
```shell
|
351 |
+
python openai_api.py
|
352 |
+
```
|
353 |
+
|
354 |
+
请求参数
|
355 |
+
```shell
|
356 |
+
curl 'http://localhost:8360/v1/chat/completions' \
|
357 |
+
-H 'Content-Type: application/json' \
|
358 |
+
-d '{
|
359 |
+
"max_new_tokens": 200,
|
360 |
+
"do_sample": true,
|
361 |
+
"top_k": 0,
|
362 |
+
"top_p": 0.8,
|
363 |
+
"temperature": 1.0,
|
364 |
+
"repetition_penalty": 1.0,
|
365 |
+
"messages": [
|
366 |
+
{"role": "system", "content": "You are a helpful assistant."},
|
367 |
+
{"role": "user", "content": "你好"}
|
368 |
+
]
|
369 |
+
}'
|
370 |
+
```
|
371 |
+
|
372 |
+
<br>
|
373 |
+
|
374 |
+
# 模型推理
|
375 |
+
## 模型量化
|
376 |
+
我们提供了基于AutoGPTQ的量化方案,并开源了Int4量化模型。
|
377 |
+
|
378 |
+
## 模型部署
|
379 |
+
### vLLM安装环境
|
380 |
+
如希望部署及加速推理,我们建议你使用 `vLLM==0.3.3`。
|
381 |
+
|
382 |
+
如果你使用**CUDA 12.1和PyTorch 2.1**,可以直接使用以下命令安装vLLM。
|
383 |
+
```shell
|
384 |
+
pip install vllm==0.3.3
|
385 |
+
```
|
386 |
+
|
387 |
+
否则请参考vLLM官方的[安装说明](https://docs.vllm.ai/en/latest/getting_started/installation.html)。
|
388 |
+
|
389 |
+
>安装完成后,还需要以下操作~
|
390 |
+
1. 把vllm/zhinao.py文件复制到env环境对应的vllm/model_executor/models目录下。
|
391 |
+
2. 把vllm/serving_chat.py文件复制到env环境对应的vllm/entrypoints/openai目录下。
|
392 |
+
3. 然后在vllm/model_executor/models/\_\_init\_\_.py文件增加一行代码
|
393 |
+
|
394 |
+
```shell
|
395 |
+
"ZhinaoForCausalLM": ("zhinao", "ZhinaoForCausalLM"),
|
396 |
+
```
|
397 |
+
|
398 |
+
### vLLM服务启动
|
399 |
+
|
400 |
+
启动服务
|
401 |
+
```shell
|
402 |
+
python -m vllm.entrypoints.openai.api_server \
|
403 |
+
--served-model-name 360Zhinao2-7B-Chat-4K \
|
404 |
+
--model qihoo360/360Zhinao2-7B-Chat-4K \
|
405 |
+
--trust-remote-code \
|
406 |
+
--tensor-parallel-size 1 \
|
407 |
+
--max-model-len 4096 \
|
408 |
+
--host 0.0.0.0 \
|
409 |
+
--port 8360
|
410 |
+
```
|
411 |
+
|
412 |
+
使用curl请求服务
|
413 |
+
```shell
|
414 |
+
curl http://localhost:8360/v1/chat/completions \
|
415 |
+
-H "Content-Type: application/json" \
|
416 |
+
-d '{
|
417 |
+
"model": "360Zhinao2-7B-Chat-4K",
|
418 |
+
"max_tokens": 200,
|
419 |
+
"top_k": -1,
|
420 |
+
"top_p": 0.8,
|
421 |
+
"temperature": 1.0,
|
422 |
+
"presence_penalty": 0.0,
|
423 |
+
"frequency_penalty": 0.0,
|
424 |
+
"messages": [
|
425 |
+
{"role": "system", "content": "You are a helpful assistant."},
|
426 |
+
{"role": "user", "content": "你好"}
|
427 |
+
],
|
428 |
+
"stop": [
|
429 |
+
"<eod>",
|
430 |
+
"<|im_end|>",
|
431 |
+
"<|im_start|>"
|
432 |
+
]
|
433 |
+
}'
|
434 |
+
```
|
435 |
+
使用python请求服务
|
436 |
+
```python
|
437 |
+
from openai import OpenAI
|
438 |
+
# Set OpenAI's API key and API base to use vLLM's API server.
|
439 |
+
openai_api_key = "EMPTY"
|
440 |
+
openai_api_base = "http://localhost:8360/v1"
|
441 |
+
|
442 |
+
client = OpenAI(
|
443 |
+
api_key=openai_api_key,
|
444 |
+
base_url=openai_api_base,
|
445 |
+
)
|
446 |
+
|
447 |
+
chat_response = client.chat.completions.create(
|
448 |
+
model="360Zhinao2-7B-Chat-4K",
|
449 |
+
messages=[
|
450 |
+
{"role": "system", "content": "You are a helpful assistant."},
|
451 |
+
{"role": "user", "content": "你好"},
|
452 |
+
],
|
453 |
+
stop=[
|
454 |
+
"<eod>",
|
455 |
+
"<|im_end|>",
|
456 |
+
"<|im_start|>"
|
457 |
+
],
|
458 |
+
presence_penalty=0.0,
|
459 |
+
frequency_penalty=0.0
|
460 |
+
)
|
461 |
+
print("Chat response:", chat_response)
|
462 |
+
```
|
463 |
+
|
464 |
+
> 注意:如需要开启重复惩罚,建议使用 *presence_penalty* 和 *frequency_penalty* 参数。
|
465 |
+
|
466 |
+
<br>
|
467 |
+
|
468 |
+
# 模型微调
|
469 |
+
## 训练数据
|
470 |
+
|
471 |
+
我们提供了微调训练样例数据 data/test.json,该样例数据是从 [multiturn_chat_0.8M](https://huggingface.co/datasets/BelleGroup/multiturn_chat_0.8M) 采样出 1 万条,并且做了格式转换。
|
472 |
+
|
473 |
+
数据格式:
|
474 |
+
```json
|
475 |
+
[
|
476 |
+
{
|
477 |
+
"id": 1,
|
478 |
+
"conversations": [
|
479 |
+
{
|
480 |
+
"from": "system",
|
481 |
+
"value": "You are a helpful assistant."
|
482 |
+
},
|
483 |
+
{
|
484 |
+
"from": "user",
|
485 |
+
"value": "您好啊"
|
486 |
+
},
|
487 |
+
{
|
488 |
+
"from": "assistant",
|
489 |
+
"value": "你好!我今天能为您做些什么?有什么问题或需要帮助吗? 我在这里为您提供服务。"
|
490 |
+
}
|
491 |
+
]
|
492 |
+
}
|
493 |
+
]
|
494 |
+
```
|
495 |
+
|
496 |
+
## 微调训练
|
497 |
+
训练脚本如下:
|
498 |
+
```shell
|
499 |
+
set -x
|
500 |
+
|
501 |
+
HOSTFILE=hostfile
|
502 |
+
DS_CONFIG=./finetune/ds_config_zero2.json
|
503 |
+
|
504 |
+
# PARAMS
|
505 |
+
LR=5e-6
|
506 |
+
EPOCHS=3
|
507 |
+
MAX_LEN=4096
|
508 |
+
BATCH_SIZE=4
|
509 |
+
NUM_NODES=1
|
510 |
+
NUM_GPUS=8
|
511 |
+
MASTER_PORT=29500
|
512 |
+
|
513 |
+
IS_CONCAT=False # 是否数据拼接到最大长度(MAX_LEN)
|
514 |
+
|
515 |
+
DATA_PATH="./data/training_data_sample.json"
|
516 |
+
MODEL_PATH="qihoo360/360Zhinao2-7B-Base"
|
517 |
+
OUTPUT_DIR="./outputs/"
|
518 |
+
|
519 |
+
deepspeed --hostfile ${HOSTFILE} \
|
520 |
+
--master_port ${MASTER_PORT} \
|
521 |
+
--num_nodes ${NUM_NODES} \
|
522 |
+
--num_gpus ${NUM_GPUS} \
|
523 |
+
finetune.py \
|
524 |
+
--report_to "tensorboard" \
|
525 |
+
--data_path ${DATA_PATH} \
|
526 |
+
--model_name_or_path ${MODEL_PATH} \
|
527 |
+
--output_dir ${OUTPUT_DIR} \
|
528 |
+
--model_max_length ${MAX_LEN} \
|
529 |
+
--num_train_epochs ${EPOCHS} \
|
530 |
+
--per_device_train_batch_size ${BATCH_SIZE} \
|
531 |
+
--gradient_accumulation_steps 1 \
|
532 |
+
--save_strategy steps \
|
533 |
+
--save_steps 200 \
|
534 |
+
--learning_rate ${LR} \
|
535 |
+
--lr_scheduler_type cosine \
|
536 |
+
--adam_beta1 0.9 \
|
537 |
+
--adam_beta2 0.95 \
|
538 |
+
--adam_epsilon 1e-8 \
|
539 |
+
--max_grad_norm 1.0 \
|
540 |
+
--weight_decay 0.1 \
|
541 |
+
--warmup_ratio 0.01 \
|
542 |
+
--gradient_checkpointing True \
|
543 |
+
--bf16 True \
|
544 |
+
--tf32 True \
|
545 |
+
--deepspeed ${DS_CONFIG} \
|
546 |
+
--is_concat ${IS_CONCAT} \
|
547 |
+
--logging_steps 1 \
|
548 |
+
--log_on_each_node False
|
549 |
+
```
|
550 |
+
```shell
|
551 |
+
bash finetune/ds_finetune.sh
|
552 |
+
```
|
553 |
+
- 可通过配置hostfile,实现单机、多机训练。
|
554 |
+
- 可通过配置ds_config,实现zero2、zero3。
|
555 |
+
- 可通过配置fp16、bf16实现混合精度训练,建议使用bf16,与预训练模型保持一致。
|
556 |
+
- 可通过配置is_concat参数,控制训练数据是否拼接,当训练数据量级较大时,可通过拼接提升训练效率。
|
557 |
+
|
558 |
+
<br>
|
559 |
+
|
560 |
+
# 许可证
|
561 |
+
|
562 |
+
本仓库源码遵循开源许可证Apache 2.0。
|
563 |
+
|
564 |
+
360智脑开源模型支持免费商用,无需向我们进行特殊申请。
|
config.json
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"architectures": [
|
3 |
+
"ZhinaoForCausalLM"
|
4 |
+
],
|
5 |
+
"attention_dropout": 0.1,
|
6 |
+
"attn_dropout_prob": 0.1,
|
7 |
+
"auto_map": {
|
8 |
+
"AutoConfig": "configuration_zhinao.ZhinaoConfig",
|
9 |
+
"AutoModelForCausalLM": "modeling_zhinao.ZhinaoForCausalLM"
|
10 |
+
},
|
11 |
+
"bf16": true,
|
12 |
+
"emb_dropout_prob": 0.1,
|
13 |
+
"flah-attn_version": "2.5.5",
|
14 |
+
"fp16": false,
|
15 |
+
"hidden_act": "silu",
|
16 |
+
"hidden_size": 4096,
|
17 |
+
"initializer_range": 0.01,
|
18 |
+
"intermediate_size": 13056,
|
19 |
+
"log_logit": false,
|
20 |
+
"max_position_embeddings": 36000,
|
21 |
+
"model_max_length": 36000,
|
22 |
+
"model_type": "zhinao",
|
23 |
+
"num_attention_heads": 32,
|
24 |
+
"num_hidden_layers": 32,
|
25 |
+
"num_key_value_heads": 8,
|
26 |
+
"rms_norm_eps": 1e-05,
|
27 |
+
"rope_scaling": null,
|
28 |
+
"rope_theta": 1000000.0,
|
29 |
+
"switch": 0,
|
30 |
+
"tie_word_embeddings": false,
|
31 |
+
"torch_dtype": "bfloat16",
|
32 |
+
"transformers_version": "4.39.3",
|
33 |
+
"use_cache": false,
|
34 |
+
"use_flash_attn": true,
|
35 |
+
"use_focal": false,
|
36 |
+
"use_loss_weight": false,
|
37 |
+
"use_pack_loss": false,
|
38 |
+
"vocab_size": 158464
|
39 |
+
}
|
configuration_zhinao.py
ADDED
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) 360zhinao and the HuggingFace Inc. team. All rights reserved.
|
2 |
+
# This code is built upon Huggingface's transformers repository.
|
3 |
+
|
4 |
+
|
5 |
+
from transformers.configuration_utils import PretrainedConfig
|
6 |
+
from transformers.utils import logging
|
7 |
+
|
8 |
+
|
9 |
+
logger = logging.get_logger(__name__)
|
10 |
+
|
11 |
+
|
12 |
+
class ZhinaoConfig(PretrainedConfig):
|
13 |
+
|
14 |
+
model_type = "zhinao"
|
15 |
+
keys_to_ignore_at_inference = ["past_key_values"]
|
16 |
+
|
17 |
+
def __init__(
|
18 |
+
self,
|
19 |
+
vocab_size=32000,
|
20 |
+
hidden_size=4096,
|
21 |
+
intermediate_size=11008,
|
22 |
+
num_hidden_layers=32,
|
23 |
+
num_attention_heads=32,
|
24 |
+
num_key_value_heads=None,
|
25 |
+
hidden_act="silu",
|
26 |
+
max_position_embeddings=2048,
|
27 |
+
initializer_range=0.02,
|
28 |
+
rms_norm_eps=1e-6,
|
29 |
+
use_cache=True,
|
30 |
+
pad_token_id=None,
|
31 |
+
bos_token_id=None,
|
32 |
+
eos_token_id=None,
|
33 |
+
tie_word_embeddings=False,
|
34 |
+
rope_theta=10000.0,
|
35 |
+
rope_scaling=None,
|
36 |
+
bf16 = False,
|
37 |
+
fp16 = False,
|
38 |
+
use_flash_attn="auto",
|
39 |
+
**kwargs,
|
40 |
+
):
|
41 |
+
self.vocab_size = vocab_size
|
42 |
+
self.max_position_embeddings = max_position_embeddings
|
43 |
+
self.hidden_size = hidden_size
|
44 |
+
self.intermediate_size = intermediate_size
|
45 |
+
self.num_hidden_layers = num_hidden_layers
|
46 |
+
self.num_attention_heads = num_attention_heads
|
47 |
+
|
48 |
+
# for backward compatibility
|
49 |
+
if num_key_value_heads is None:
|
50 |
+
num_key_value_heads = num_attention_heads
|
51 |
+
|
52 |
+
self.num_key_value_heads = num_key_value_heads
|
53 |
+
self.hidden_act = hidden_act
|
54 |
+
self.initializer_range = initializer_range
|
55 |
+
self.rms_norm_eps = rms_norm_eps
|
56 |
+
self.use_cache = use_cache
|
57 |
+
self.rope_theta = rope_theta
|
58 |
+
self.rope_scaling = rope_scaling
|
59 |
+
self._rope_scaling_validation()
|
60 |
+
|
61 |
+
self.bf16 = bf16
|
62 |
+
self.fp16 = fp16
|
63 |
+
self.use_flash_attn = use_flash_attn
|
64 |
+
|
65 |
+
super().__init__(
|
66 |
+
pad_token_id=pad_token_id,
|
67 |
+
bos_token_id=bos_token_id,
|
68 |
+
eos_token_id=eos_token_id,
|
69 |
+
tie_word_embeddings=tie_word_embeddings,
|
70 |
+
**kwargs,
|
71 |
+
)
|
72 |
+
|
73 |
+
def _rope_scaling_validation(self):
|
74 |
+
"""
|
75 |
+
Validate the `rope_scaling` configuration.
|
76 |
+
"""
|
77 |
+
if self.rope_scaling is None:
|
78 |
+
return
|
79 |
+
|
80 |
+
if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
|
81 |
+
raise ValueError(
|
82 |
+
"`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, "
|
83 |
+
f"got {self.rope_scaling}"
|
84 |
+
)
|
85 |
+
rope_scaling_type = self.rope_scaling.get("type", None)
|
86 |
+
rope_scaling_factor = self.rope_scaling.get("factor", None)
|
87 |
+
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic", "ntk"]:
|
88 |
+
raise ValueError(
|
89 |
+
f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}"
|
90 |
+
)
|
91 |
+
if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0:
|
92 |
+
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}")
|
generation_config.json
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_from_model_config": true,
|
3 |
+
"do_sample": true,
|
4 |
+
"eos_token_id": [
|
5 |
+
158326,
|
6 |
+
158332,
|
7 |
+
158333
|
8 |
+
],
|
9 |
+
"max_new_tokens": 1024,
|
10 |
+
"pad_token_id": 158326,
|
11 |
+
"top_k": 0,
|
12 |
+
"top_p": 0.8,
|
13 |
+
"transformers_version": "4.39.3"
|
14 |
+
}
|
generation_utils.py
ADDED
@@ -0,0 +1,187 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
import torch
|
3 |
+
import numpy as np
|
4 |
+
from queue import Queue
|
5 |
+
from typing import Tuple, List, Union, Iterable
|
6 |
+
from transformers.utils import logging, add_start_docstrings
|
7 |
+
from transformers.generation.logits_process import LogitsProcessor, LOGITS_PROCESSOR_INPUTS_DOCSTRING, LogitsProcessorList
|
8 |
+
|
9 |
+
|
10 |
+
def make_context(model, tokenizer,
|
11 |
+
messages: List[dict],
|
12 |
+
system: str = "You are a helpful assistant.",
|
13 |
+
max_new_tokens: int=0,
|
14 |
+
):
|
15 |
+
|
16 |
+
max_new_tokens = max_new_tokens or model.generation_config.max_new_tokens
|
17 |
+
max_input_length = model.config.model_max_length - max_new_tokens
|
18 |
+
|
19 |
+
im_start_id = [tokenizer.im_start_id]
|
20 |
+
im_end_id = [tokenizer.im_end_id]
|
21 |
+
nl_tokens = tokenizer.encode("\n")
|
22 |
+
|
23 |
+
def _tokenize_str(role, content):
|
24 |
+
return tokenizer.encode(role, allowed_special=set()) + nl_tokens + tokenizer.encode(content, allowed_special=set())
|
25 |
+
|
26 |
+
def _parse_messages(messages):
|
27 |
+
system, query, history = "", "", []
|
28 |
+
## system
|
29 |
+
if messages[0]["role"] == "system":
|
30 |
+
system = messages[0]["content"]
|
31 |
+
messages = messages[1:]
|
32 |
+
## query
|
33 |
+
assert messages[-1]["role"] == "user"
|
34 |
+
query = messages[-1]["content"]
|
35 |
+
messages = messages[:-1]
|
36 |
+
## history
|
37 |
+
assert len(messages) % 2 == 0
|
38 |
+
for i in range(0, len(messages), 2):
|
39 |
+
assert messages[i]["role"] == "user" and messages[i+1]["role"] == "assistant"
|
40 |
+
history.append([messages[i]["content"], messages[i+1]["content"]])
|
41 |
+
|
42 |
+
return system, query, history
|
43 |
+
|
44 |
+
_system, query, history = _parse_messages(messages)
|
45 |
+
|
46 |
+
## system
|
47 |
+
system_text = _system if _system != "" else system
|
48 |
+
system_tokens = []
|
49 |
+
if system_text:
|
50 |
+
system_tokens = im_start_id + _tokenize_str("system", system_text) + im_end_id + nl_tokens
|
51 |
+
|
52 |
+
## query
|
53 |
+
query_tokens = im_start_id + _tokenize_str("user", query) + im_end_id + nl_tokens
|
54 |
+
## final assistant
|
55 |
+
final_tokens = im_start_id + tokenizer.encode("assistant", allowed_special=set()) + nl_tokens
|
56 |
+
|
57 |
+
## max_history_tokens
|
58 |
+
max_history_length = max_input_length - len(system_tokens) - len(query_tokens) - len(final_tokens)
|
59 |
+
|
60 |
+
## history
|
61 |
+
context_tokens = []
|
62 |
+
for turn_query, turn_response in reversed(history):
|
63 |
+
## query tokens
|
64 |
+
history_query_tokens = im_start_id + _tokenize_str("user", turn_query) + im_end_id + nl_tokens
|
65 |
+
## answer tokens
|
66 |
+
histroy_response_tokens = im_start_id + _tokenize_str("assistant", turn_response) + im_end_id + nl_tokens
|
67 |
+
## this round tokens
|
68 |
+
next_context_tokens = history_query_tokens + histroy_response_tokens
|
69 |
+
## concat
|
70 |
+
current_context_size = len(next_context_tokens) + len(context_tokens)
|
71 |
+
if current_context_size < max_history_length:
|
72 |
+
context_tokens = next_context_tokens + context_tokens
|
73 |
+
else:
|
74 |
+
break
|
75 |
+
input_tokens = system_tokens + context_tokens + query_tokens + final_tokens
|
76 |
+
|
77 |
+
return torch.LongTensor([input_tokens]).to(model.device)
|
78 |
+
|
79 |
+
|
80 |
+
class TextIterStreamer:
|
81 |
+
def __init__(self, tokenizer, skip_prompt=False, skip_special_tokens=False):
|
82 |
+
self.tokenizer = tokenizer
|
83 |
+
self.skip_prompt = skip_prompt
|
84 |
+
self.skip_special_tokens = skip_special_tokens
|
85 |
+
self.tokens = []
|
86 |
+
self.text_queue = Queue()
|
87 |
+
self.next_tokens_are_prompt = True
|
88 |
+
|
89 |
+
def put(self, value):
|
90 |
+
if self.skip_prompt and self.next_tokens_are_prompt:
|
91 |
+
self.next_tokens_are_prompt = False
|
92 |
+
else:
|
93 |
+
if len(value.shape) > 1:
|
94 |
+
value = value[0]
|
95 |
+
self.tokens.extend(value.tolist())
|
96 |
+
tokens_str = self.tokenizer.decode(self.tokens, skip_special_tokens=self.skip_special_tokens, errors='ignore')
|
97 |
+
self.text_queue.put(tokens_str)
|
98 |
+
|
99 |
+
def end(self):
|
100 |
+
self.text_queue.put(None)
|
101 |
+
|
102 |
+
def __iter__(self):
|
103 |
+
return self
|
104 |
+
|
105 |
+
def __next__(self):
|
106 |
+
value = self.text_queue.get()
|
107 |
+
if value is None:
|
108 |
+
raise StopIteration()
|
109 |
+
else:
|
110 |
+
return value
|
111 |
+
|
112 |
+
|
113 |
+
class OutputRepetitionPenaltyLogitsProcessor(LogitsProcessor):
|
114 |
+
r"""
|
115 |
+
[`OutputLogitsProcessor`] that prevents the repetition of previous tokens through a penalty. This penalty is applied at
|
116 |
+
most once per token. Note that, for decoder-only models like most LLMs, the considered tokens include the prompt.
|
117 |
+
|
118 |
+
In the original [paper](https://arxiv.org/pdf/1909.05858.pdf), the authors suggest the use of a penalty of around
|
119 |
+
1.2 to achieve a good balance between truthful generation and lack of repetition. To penalize and reduce
|
120 |
+
repetition, use `penalty` values above 1.0, where a higher value penalizes more strongly. To reward and encourage
|
121 |
+
repetition, use `penalty` values between 0.0 and 1.0, where a lower value rewards more strongly.
|
122 |
+
|
123 |
+
Args:
|
124 |
+
penalty (`float`):
|
125 |
+
The parameter for repetition penalty. 1.0 means no penalty. Above 1.0 penalizes previously generated
|
126 |
+
tokens. Between 0.0 and 1.0 rewards previously generated tokens.
|
127 |
+
"""
|
128 |
+
|
129 |
+
def __init__(self, input_length: int,
|
130 |
+
presence_penalties: float = 1.0,
|
131 |
+
frequency_penalties: float = 0,
|
132 |
+
repetition_penalties: float = 0):
|
133 |
+
if not (repetition_penalties > 0):
|
134 |
+
raise ValueError(f"`repetition_penalties` has to be a strictly positive float, but is {repetition_penalties}")
|
135 |
+
if not ( (frequency_penalties >= -2) and (frequency_penalties <= 2) ):
|
136 |
+
raise ValueError(f"`frequency_penalties` has to be [-2, 2], but is {frequency_penalties}")
|
137 |
+
if not ( (presence_penalties >= -2) and (presence_penalties <= 2) ):
|
138 |
+
raise ValueError(f"`presence_penalties` has to be [-2, 2], but is {presence_penalties}")
|
139 |
+
|
140 |
+
self.repetition_penalties = repetition_penalties
|
141 |
+
self.frequency_penalties = frequency_penalties
|
142 |
+
self.presence_penalties = presence_penalties
|
143 |
+
self.input_length = input_length
|
144 |
+
|
145 |
+
def _get_bin_counts_and_mask(
|
146 |
+
self,
|
147 |
+
tokens: torch.Tensor,
|
148 |
+
vocab_size: int,
|
149 |
+
num_seqs: int,
|
150 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
151 |
+
# Compute the bin counts for the tokens.
|
152 |
+
# vocab_size + 1 for padding.
|
153 |
+
bin_counts = torch.zeros((num_seqs, vocab_size + 1),
|
154 |
+
dtype=torch.long,
|
155 |
+
device=tokens.device)
|
156 |
+
bin_counts.scatter_add_(1, tokens, torch.ones_like(tokens))
|
157 |
+
bin_counts = bin_counts[:, :vocab_size]
|
158 |
+
mask = bin_counts > 0
|
159 |
+
|
160 |
+
return bin_counts, mask
|
161 |
+
|
162 |
+
@add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)
|
163 |
+
def __call__(self, input_ids: torch.LongTensor, logits: torch.FloatTensor) -> torch.FloatTensor:
|
164 |
+
prompt_tokens_tensor = input_ids[:, :self.input_length+1]
|
165 |
+
output_tokens_tensor = input_ids[:, self.input_length+1:]
|
166 |
+
|
167 |
+
num_seqs, vocab_size = logits.shape
|
168 |
+
_, prompt_mask = self._get_bin_counts_and_mask(
|
169 |
+
prompt_tokens_tensor, vocab_size, num_seqs)
|
170 |
+
output_bin_counts, output_mask = self._get_bin_counts_and_mask(
|
171 |
+
output_tokens_tensor, vocab_size, num_seqs)
|
172 |
+
|
173 |
+
repetition_penalties = torch.Tensor([self.repetition_penalties]).to(logits.device)
|
174 |
+
frequency_penalties = torch.Tensor([self.frequency_penalties]).to(logits.device)
|
175 |
+
presence_penalties = torch.Tensor([self.presence_penalties]).to(logits.device)
|
176 |
+
|
177 |
+
repetition_penalties = repetition_penalties[:, None].repeat(1, vocab_size)
|
178 |
+
repetition_penalties[~(prompt_mask | output_mask)] = 1.0
|
179 |
+
logits = torch.where(logits > 0, logits / repetition_penalties,
|
180 |
+
logits * repetition_penalties)
|
181 |
+
|
182 |
+
# We follow the definition in OpenAI API.
|
183 |
+
# Refer to https://platform.openai.com/docs/api-reference/parameter-details
|
184 |
+
logits -= frequency_penalties.unsqueeze_(dim=1) * output_bin_counts
|
185 |
+
logits -= presence_penalties.unsqueeze_(dim=1) * output_mask
|
186 |
+
|
187 |
+
return logits
|
model-00001-of-00004.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6858fdf66fe2f03f7734a6cfb866a6ac76f95b97f6d56708c70a6a57e3969725
|
3 |
+
size 4991500768
|
model-00002-of-00004.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f0405da33b8dc8cf2896365ef57ca768c467f38499b6b915954f379e124a1085
|
3 |
+
size 4997868656
|
model-00003-of-00004.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9506df7899d4c8e637b4acff656291235a5e25d064cd64df908297c1c4524671
|
3 |
+
size 4261734000
|
model-00004-of-00004.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:203204ab533c3843a2632f104559eae471574d661d33a16c7a4968b955ba7b90
|
3 |
+
size 1298137216
|
model.safetensors.index.json
ADDED
@@ -0,0 +1,266 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"metadata": {
|
3 |
+
"total_size": 15549210624
|
4 |
+
},
|
5 |
+
"weight_map": {
|
6 |
+
"lm_head.weight": "model-00004-of-00004.safetensors",
|
7 |
+
"model.embed_tokens.weight": "model-00001-of-00004.safetensors",
|
8 |
+
"model.layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
9 |
+
"model.layers.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
10 |
+
"model.layers.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
11 |
+
"model.layers.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
12 |
+
"model.layers.0.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
13 |
+
"model.layers.0.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
14 |
+
"model.layers.0.self_attn.qkv_proj.bias": "model-00001-of-00004.safetensors",
|
15 |
+
"model.layers.0.self_attn.qkv_proj.weight": "model-00001-of-00004.safetensors",
|
16 |
+
"model.layers.1.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
17 |
+
"model.layers.1.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
18 |
+
"model.layers.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
19 |
+
"model.layers.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
20 |
+
"model.layers.1.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
21 |
+
"model.layers.1.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
22 |
+
"model.layers.1.self_attn.qkv_proj.bias": "model-00001-of-00004.safetensors",
|
23 |
+
"model.layers.1.self_attn.qkv_proj.weight": "model-00001-of-00004.safetensors",
|
24 |
+
"model.layers.10.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
25 |
+
"model.layers.10.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
26 |
+
"model.layers.10.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
27 |
+
"model.layers.10.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
28 |
+
"model.layers.10.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
29 |
+
"model.layers.10.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
30 |
+
"model.layers.10.self_attn.qkv_proj.bias": "model-00002-of-00004.safetensors",
|
31 |
+
"model.layers.10.self_attn.qkv_proj.weight": "model-00002-of-00004.safetensors",
|
32 |
+
"model.layers.11.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
33 |
+
"model.layers.11.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
34 |
+
"model.layers.11.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
35 |
+
"model.layers.11.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
36 |
+
"model.layers.11.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
37 |
+
"model.layers.11.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
38 |
+
"model.layers.11.self_attn.qkv_proj.bias": "model-00002-of-00004.safetensors",
|
39 |
+
"model.layers.11.self_attn.qkv_proj.weight": "model-00002-of-00004.safetensors",
|
40 |
+
"model.layers.12.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
41 |
+
"model.layers.12.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
42 |
+
"model.layers.12.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
43 |
+
"model.layers.12.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
44 |
+
"model.layers.12.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
45 |
+
"model.layers.12.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
46 |
+
"model.layers.12.self_attn.qkv_proj.bias": "model-00002-of-00004.safetensors",
|
47 |
+
"model.layers.12.self_attn.qkv_proj.weight": "model-00002-of-00004.safetensors",
|
48 |
+
"model.layers.13.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
49 |
+
"model.layers.13.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
50 |
+
"model.layers.13.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
51 |
+
"model.layers.13.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
52 |
+
"model.layers.13.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
53 |
+
"model.layers.13.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
54 |
+
"model.layers.13.self_attn.qkv_proj.bias": "model-00002-of-00004.safetensors",
|
55 |
+
"model.layers.13.self_attn.qkv_proj.weight": "model-00002-of-00004.safetensors",
|
56 |
+
"model.layers.14.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
57 |
+
"model.layers.14.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
58 |
+
"model.layers.14.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
59 |
+
"model.layers.14.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
60 |
+
"model.layers.14.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
61 |
+
"model.layers.14.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
62 |
+
"model.layers.14.self_attn.qkv_proj.bias": "model-00002-of-00004.safetensors",
|
63 |
+
"model.layers.14.self_attn.qkv_proj.weight": "model-00002-of-00004.safetensors",
|
64 |
+
"model.layers.15.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
65 |
+
"model.layers.15.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
66 |
+
"model.layers.15.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
67 |
+
"model.layers.15.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
68 |
+
"model.layers.15.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
69 |
+
"model.layers.15.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
70 |
+
"model.layers.15.self_attn.qkv_proj.bias": "model-00002-of-00004.safetensors",
|
71 |
+
"model.layers.15.self_attn.qkv_proj.weight": "model-00002-of-00004.safetensors",
|
72 |
+
"model.layers.16.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
73 |
+
"model.layers.16.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
74 |
+
"model.layers.16.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
75 |
+
"model.layers.16.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
76 |
+
"model.layers.16.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
77 |
+
"model.layers.16.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
78 |
+
"model.layers.16.self_attn.qkv_proj.bias": "model-00002-of-00004.safetensors",
|
79 |
+
"model.layers.16.self_attn.qkv_proj.weight": "model-00002-of-00004.safetensors",
|
80 |
+
"model.layers.17.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
81 |
+
"model.layers.17.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
82 |
+
"model.layers.17.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
83 |
+
"model.layers.17.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
84 |
+
"model.layers.17.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
85 |
+
"model.layers.17.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
86 |
+
"model.layers.17.self_attn.qkv_proj.bias": "model-00002-of-00004.safetensors",
|
87 |
+
"model.layers.17.self_attn.qkv_proj.weight": "model-00002-of-00004.safetensors",
|
88 |
+
"model.layers.18.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
89 |
+
"model.layers.18.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
90 |
+
"model.layers.18.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
91 |
+
"model.layers.18.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
92 |
+
"model.layers.18.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
93 |
+
"model.layers.18.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
94 |
+
"model.layers.18.self_attn.qkv_proj.bias": "model-00002-of-00004.safetensors",
|
95 |
+
"model.layers.18.self_attn.qkv_proj.weight": "model-00002-of-00004.safetensors",
|
96 |
+
"model.layers.19.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
97 |
+
"model.layers.19.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
98 |
+
"model.layers.19.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
99 |
+
"model.layers.19.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
100 |
+
"model.layers.19.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
101 |
+
"model.layers.19.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
102 |
+
"model.layers.19.self_attn.qkv_proj.bias": "model-00002-of-00004.safetensors",
|
103 |
+
"model.layers.19.self_attn.qkv_proj.weight": "model-00002-of-00004.safetensors",
|
104 |
+
"model.layers.2.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
105 |
+
"model.layers.2.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
106 |
+
"model.layers.2.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
107 |
+
"model.layers.2.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
108 |
+
"model.layers.2.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
109 |
+
"model.layers.2.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
110 |
+
"model.layers.2.self_attn.qkv_proj.bias": "model-00001-of-00004.safetensors",
|
111 |
+
"model.layers.2.self_attn.qkv_proj.weight": "model-00001-of-00004.safetensors",
|
112 |
+
"model.layers.20.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
113 |
+
"model.layers.20.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
114 |
+
"model.layers.20.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
115 |
+
"model.layers.20.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
116 |
+
"model.layers.20.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
117 |
+
"model.layers.20.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
118 |
+
"model.layers.20.self_attn.qkv_proj.bias": "model-00002-of-00004.safetensors",
|
119 |
+
"model.layers.20.self_attn.qkv_proj.weight": "model-00002-of-00004.safetensors",
|
120 |
+
"model.layers.21.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
121 |
+
"model.layers.21.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
122 |
+
"model.layers.21.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
123 |
+
"model.layers.21.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
124 |
+
"model.layers.21.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
125 |
+
"model.layers.21.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
126 |
+
"model.layers.21.self_attn.qkv_proj.bias": "model-00002-of-00004.safetensors",
|
127 |
+
"model.layers.21.self_attn.qkv_proj.weight": "model-00002-of-00004.safetensors",
|
128 |
+
"model.layers.22.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
129 |
+
"model.layers.22.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
130 |
+
"model.layers.22.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
131 |
+
"model.layers.22.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
132 |
+
"model.layers.22.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
133 |
+
"model.layers.22.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
134 |
+
"model.layers.22.self_attn.qkv_proj.bias": "model-00003-of-00004.safetensors",
|
135 |
+
"model.layers.22.self_attn.qkv_proj.weight": "model-00003-of-00004.safetensors",
|
136 |
+
"model.layers.23.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
137 |
+
"model.layers.23.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
138 |
+
"model.layers.23.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
139 |
+
"model.layers.23.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
140 |
+
"model.layers.23.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
141 |
+
"model.layers.23.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
142 |
+
"model.layers.23.self_attn.qkv_proj.bias": "model-00003-of-00004.safetensors",
|
143 |
+
"model.layers.23.self_attn.qkv_proj.weight": "model-00003-of-00004.safetensors",
|
144 |
+
"model.layers.24.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
145 |
+
"model.layers.24.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
146 |
+
"model.layers.24.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
147 |
+
"model.layers.24.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
148 |
+
"model.layers.24.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
149 |
+
"model.layers.24.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
150 |
+
"model.layers.24.self_attn.qkv_proj.bias": "model-00003-of-00004.safetensors",
|
151 |
+
"model.layers.24.self_attn.qkv_proj.weight": "model-00003-of-00004.safetensors",
|
152 |
+
"model.layers.25.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
153 |
+
"model.layers.25.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
154 |
+
"model.layers.25.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
155 |
+
"model.layers.25.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
156 |
+
"model.layers.25.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
157 |
+
"model.layers.25.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
158 |
+
"model.layers.25.self_attn.qkv_proj.bias": "model-00003-of-00004.safetensors",
|
159 |
+
"model.layers.25.self_attn.qkv_proj.weight": "model-00003-of-00004.safetensors",
|
160 |
+
"model.layers.26.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
161 |
+
"model.layers.26.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
162 |
+
"model.layers.26.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
163 |
+
"model.layers.26.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
164 |
+
"model.layers.26.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
165 |
+
"model.layers.26.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
166 |
+
"model.layers.26.self_attn.qkv_proj.bias": "model-00003-of-00004.safetensors",
|
167 |
+
"model.layers.26.self_attn.qkv_proj.weight": "model-00003-of-00004.safetensors",
|
168 |
+
"model.layers.27.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
169 |
+
"model.layers.27.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
170 |
+
"model.layers.27.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
171 |
+
"model.layers.27.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
172 |
+
"model.layers.27.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
173 |
+
"model.layers.27.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
174 |
+
"model.layers.27.self_attn.qkv_proj.bias": "model-00003-of-00004.safetensors",
|
175 |
+
"model.layers.27.self_attn.qkv_proj.weight": "model-00003-of-00004.safetensors",
|
176 |
+
"model.layers.28.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
177 |
+
"model.layers.28.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
178 |
+
"model.layers.28.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
179 |
+
"model.layers.28.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
180 |
+
"model.layers.28.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
181 |
+
"model.layers.28.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
182 |
+
"model.layers.28.self_attn.qkv_proj.bias": "model-00003-of-00004.safetensors",
|
183 |
+
"model.layers.28.self_attn.qkv_proj.weight": "model-00003-of-00004.safetensors",
|
184 |
+
"model.layers.29.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
185 |
+
"model.layers.29.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
186 |
+
"model.layers.29.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
187 |
+
"model.layers.29.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
188 |
+
"model.layers.29.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
189 |
+
"model.layers.29.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
190 |
+
"model.layers.29.self_attn.qkv_proj.bias": "model-00003-of-00004.safetensors",
|
191 |
+
"model.layers.29.self_attn.qkv_proj.weight": "model-00003-of-00004.safetensors",
|
192 |
+
"model.layers.3.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
193 |
+
"model.layers.3.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
194 |
+
"model.layers.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
195 |
+
"model.layers.3.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
196 |
+
"model.layers.3.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
197 |
+
"model.layers.3.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
198 |
+
"model.layers.3.self_attn.qkv_proj.bias": "model-00001-of-00004.safetensors",
|
199 |
+
"model.layers.3.self_attn.qkv_proj.weight": "model-00001-of-00004.safetensors",
|
200 |
+
"model.layers.30.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
201 |
+
"model.layers.30.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
202 |
+
"model.layers.30.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
203 |
+
"model.layers.30.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
204 |
+
"model.layers.30.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
205 |
+
"model.layers.30.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
206 |
+
"model.layers.30.self_attn.qkv_proj.bias": "model-00003-of-00004.safetensors",
|
207 |
+
"model.layers.30.self_attn.qkv_proj.weight": "model-00003-of-00004.safetensors",
|
208 |
+
"model.layers.31.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
209 |
+
"model.layers.31.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
210 |
+
"model.layers.31.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
211 |
+
"model.layers.31.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
212 |
+
"model.layers.31.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
213 |
+
"model.layers.31.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
214 |
+
"model.layers.31.self_attn.qkv_proj.bias": "model-00003-of-00004.safetensors",
|
215 |
+
"model.layers.31.self_attn.qkv_proj.weight": "model-00003-of-00004.safetensors",
|
216 |
+
"model.layers.4.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
217 |
+
"model.layers.4.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
218 |
+
"model.layers.4.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
219 |
+
"model.layers.4.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
220 |
+
"model.layers.4.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
221 |
+
"model.layers.4.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
222 |
+
"model.layers.4.self_attn.qkv_proj.bias": "model-00001-of-00004.safetensors",
|
223 |
+
"model.layers.4.self_attn.qkv_proj.weight": "model-00001-of-00004.safetensors",
|
224 |
+
"model.layers.5.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
225 |
+
"model.layers.5.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
226 |
+
"model.layers.5.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
227 |
+
"model.layers.5.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
228 |
+
"model.layers.5.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
229 |
+
"model.layers.5.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
230 |
+
"model.layers.5.self_attn.qkv_proj.bias": "model-00001-of-00004.safetensors",
|
231 |
+
"model.layers.5.self_attn.qkv_proj.weight": "model-00001-of-00004.safetensors",
|
232 |
+
"model.layers.6.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
233 |
+
"model.layers.6.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
234 |
+
"model.layers.6.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
235 |
+
"model.layers.6.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
236 |
+
"model.layers.6.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
237 |
+
"model.layers.6.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
238 |
+
"model.layers.6.self_attn.qkv_proj.bias": "model-00001-of-00004.safetensors",
|
239 |
+
"model.layers.6.self_attn.qkv_proj.weight": "model-00001-of-00004.safetensors",
|
240 |
+
"model.layers.7.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
241 |
+
"model.layers.7.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
242 |
+
"model.layers.7.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
243 |
+
"model.layers.7.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
244 |
+
"model.layers.7.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
245 |
+
"model.layers.7.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
246 |
+
"model.layers.7.self_attn.qkv_proj.bias": "model-00001-of-00004.safetensors",
|
247 |
+
"model.layers.7.self_attn.qkv_proj.weight": "model-00001-of-00004.safetensors",
|
248 |
+
"model.layers.8.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
249 |
+
"model.layers.8.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
250 |
+
"model.layers.8.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
251 |
+
"model.layers.8.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
252 |
+
"model.layers.8.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
253 |
+
"model.layers.8.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
254 |
+
"model.layers.8.self_attn.qkv_proj.bias": "model-00001-of-00004.safetensors",
|
255 |
+
"model.layers.8.self_attn.qkv_proj.weight": "model-00001-of-00004.safetensors",
|
256 |
+
"model.layers.9.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
257 |
+
"model.layers.9.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
258 |
+
"model.layers.9.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
259 |
+
"model.layers.9.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
260 |
+
"model.layers.9.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
261 |
+
"model.layers.9.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
262 |
+
"model.layers.9.self_attn.qkv_proj.bias": "model-00001-of-00004.safetensors",
|
263 |
+
"model.layers.9.self_attn.qkv_proj.weight": "model-00001-of-00004.safetensors",
|
264 |
+
"model.norm.weight": "model-00003-of-00004.safetensors"
|
265 |
+
}
|
266 |
+
}
|
modeling_zhinao.py
ADDED
@@ -0,0 +1,1094 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) 360zhinao and the HuggingFace Inc. team. All rights reserved.
|
2 |
+
# This code is built upon Huggingface's transformers repository.
|
3 |
+
|
4 |
+
import math
|
5 |
+
import warnings
|
6 |
+
from threading import Thread
|
7 |
+
from typing import List, Optional, Tuple, Union
|
8 |
+
|
9 |
+
import torch
|
10 |
+
import torch.nn.functional as F
|
11 |
+
import torch.utils.checkpoint
|
12 |
+
from torch import nn
|
13 |
+
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
14 |
+
|
15 |
+
from transformers.activations import ACT2FN
|
16 |
+
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
|
17 |
+
from transformers.modeling_utils import PreTrainedModel
|
18 |
+
from transformers.utils import logging
|
19 |
+
from transformers.generation.utils import GenerationConfig
|
20 |
+
from transformers.generation.logits_process import LogitsProcessorList
|
21 |
+
from .configuration_zhinao import ZhinaoConfig
|
22 |
+
from .generation_utils import TextIterStreamer, make_context, OutputRepetitionPenaltyLogitsProcessor
|
23 |
+
|
24 |
+
|
25 |
+
try:
|
26 |
+
from flash_attn import flash_attn_varlen_func
|
27 |
+
from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input
|
28 |
+
except:
|
29 |
+
flash_attn_varlen_func = None
|
30 |
+
index_first_axis, pad_input, unpad_input = None, None, None
|
31 |
+
|
32 |
+
|
33 |
+
logger = logging.get_logger(__name__)
|
34 |
+
|
35 |
+
_CONFIG_FOR_DOC = "ZhinaoConfig"
|
36 |
+
|
37 |
+
|
38 |
+
def _get_unpad_data(attention_mask):
|
39 |
+
seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
|
40 |
+
indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
|
41 |
+
max_seqlen_in_batch = seqlens_in_batch.max().item()
|
42 |
+
cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0))
|
43 |
+
return (
|
44 |
+
indices,
|
45 |
+
cu_seqlens,
|
46 |
+
max_seqlen_in_batch,
|
47 |
+
)
|
48 |
+
|
49 |
+
|
50 |
+
def calc_logits_metric(logits, log_topk=True):
|
51 |
+
""""output logit metric"""
|
52 |
+
result = {
|
53 |
+
f"_max": round(torch.max(logits).item(), 7),
|
54 |
+
f"_var": round(torch.var(logits).item(), 7),
|
55 |
+
}
|
56 |
+
result["_mean"] = round(torch.mean(logits).item(), 3)
|
57 |
+
result["_min"] = round(torch.min(logits).item(), 3)
|
58 |
+
result["_max-mean"] = round(result["_max"] - result["_mean"], 3)
|
59 |
+
|
60 |
+
if log_topk:
|
61 |
+
topk = 10
|
62 |
+
topk_avg_logits = logits.topk(topk, dim=-1).values.view(-1, topk)
|
63 |
+
topk_avg_logits = torch.mean(topk_avg_logits, dim=0).tolist()
|
64 |
+
result["_topk"] = topk_avg_logits[:topk]
|
65 |
+
|
66 |
+
# probs
|
67 |
+
log_probs = F.softmax(logits, dim=-1)
|
68 |
+
|
69 |
+
topk = 3
|
70 |
+
topk_avg_probs = log_probs.topk(topk, dim=-1).values.view(-1, topk)
|
71 |
+
topk_avg_probs = torch.mean(topk_avg_probs, dim=0).tolist()
|
72 |
+
log_probs = None
|
73 |
+
|
74 |
+
for i in range(topk):
|
75 |
+
result[f"_prob_topk{i+1}"] = round(topk_avg_probs[i], 3)
|
76 |
+
return result
|
77 |
+
|
78 |
+
|
79 |
+
# Copied from transformers.models.bart.modeling_bart._make_causal_mask
|
80 |
+
def _make_causal_mask(
|
81 |
+
input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
|
82 |
+
):
|
83 |
+
"""
|
84 |
+
Make causal mask used for bi-directional self-attention.
|
85 |
+
"""
|
86 |
+
bsz, tgt_len = input_ids_shape
|
87 |
+
mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device)
|
88 |
+
mask_cond = torch.arange(mask.size(-1), device=device)
|
89 |
+
mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
|
90 |
+
mask = mask.to(dtype)
|
91 |
+
|
92 |
+
if past_key_values_length > 0:
|
93 |
+
mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
|
94 |
+
return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
|
95 |
+
|
96 |
+
|
97 |
+
# Copied from transformers.models.bart.modeling_bart._expand_mask
|
98 |
+
def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
|
99 |
+
"""
|
100 |
+
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
|
101 |
+
"""
|
102 |
+
bsz, src_len = mask.size()
|
103 |
+
tgt_len = tgt_len if tgt_len is not None else src_len
|
104 |
+
|
105 |
+
expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
|
106 |
+
|
107 |
+
inverted_mask = 1.0 - expanded_mask
|
108 |
+
|
109 |
+
return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
|
110 |
+
|
111 |
+
|
112 |
+
class ZhinaoRMSNorm(nn.Module):
|
113 |
+
def __init__(self, hidden_size, eps=1e-6):
|
114 |
+
"""
|
115 |
+
ZhinaoRMSNorm is equivalent to T5LayerNorm
|
116 |
+
"""
|
117 |
+
super().__init__()
|
118 |
+
self.weight = nn.Parameter(torch.ones(hidden_size))
|
119 |
+
self.variance_epsilon = eps
|
120 |
+
|
121 |
+
def forward(self, hidden_states):
|
122 |
+
input_dtype = hidden_states.dtype
|
123 |
+
hidden_states = hidden_states.to(torch.float32)
|
124 |
+
variance = hidden_states.pow(2).mean(-1, keepdim=True)
|
125 |
+
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
|
126 |
+
return self.weight * hidden_states.to(input_dtype)
|
127 |
+
|
128 |
+
|
129 |
+
class ZhinaoRotaryEmbedding(torch.nn.Module):
|
130 |
+
def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
|
131 |
+
super().__init__()
|
132 |
+
|
133 |
+
self.dim = dim
|
134 |
+
self.max_position_embeddings = max_position_embeddings
|
135 |
+
self.base = base
|
136 |
+
inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
|
137 |
+
self.register_buffer("inv_freq", inv_freq, persistent=False)
|
138 |
+
|
139 |
+
# Build here to make `torch.jit.trace` work.
|
140 |
+
self._set_cos_sin_cache(
|
141 |
+
seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()
|
142 |
+
)
|
143 |
+
|
144 |
+
def _set_cos_sin_cache(self, seq_len, device, dtype):
|
145 |
+
self.max_seq_len_cached = seq_len
|
146 |
+
t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)
|
147 |
+
|
148 |
+
freqs = torch.einsum("i,j->ij", t, self.inv_freq)
|
149 |
+
# Different from paper, but it uses a different permutation in order to obtain the same calculation
|
150 |
+
emb = torch.cat((freqs, freqs), dim=-1)
|
151 |
+
self.register_buffer("cos_cached", emb.cos()[None, None, :, :].to(dtype), persistent=False)
|
152 |
+
self.register_buffer("sin_cached", emb.sin()[None, None, :, :].to(dtype), persistent=False)
|
153 |
+
|
154 |
+
def forward(self, x, seq_len=None):
|
155 |
+
# x: [bs, num_attention_heads, seq_len, head_size]
|
156 |
+
if seq_len > self.max_seq_len_cached:
|
157 |
+
self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype)
|
158 |
+
|
159 |
+
return (
|
160 |
+
self.cos_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
|
161 |
+
self.sin_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
|
162 |
+
)
|
163 |
+
|
164 |
+
|
165 |
+
class ZhinaoLinearScalingRotaryEmbedding(ZhinaoRotaryEmbedding):
|
166 |
+
"""ZhinaoRotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev"""
|
167 |
+
|
168 |
+
def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
|
169 |
+
self.scaling_factor = scaling_factor
|
170 |
+
super().__init__(dim, max_position_embeddings, base, device)
|
171 |
+
|
172 |
+
def _set_cos_sin_cache(self, seq_len, device, dtype):
|
173 |
+
self.max_seq_len_cached = seq_len
|
174 |
+
t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)
|
175 |
+
t = t / self.scaling_factor
|
176 |
+
|
177 |
+
freqs = torch.einsum("i,j->ij", t, self.inv_freq)
|
178 |
+
# Different from paper, but it uses a different permutation in order to obtain the same calculation
|
179 |
+
emb = torch.cat((freqs, freqs), dim=-1)
|
180 |
+
self.register_buffer("cos_cached", emb.cos()[None, None, :, :].to(dtype), persistent=False)
|
181 |
+
self.register_buffer("sin_cached", emb.sin()[None, None, :, :].to(dtype), persistent=False)
|
182 |
+
|
183 |
+
|
184 |
+
class ZhinaoDynamicNTKScalingRotaryEmbedding(ZhinaoRotaryEmbedding):
|
185 |
+
"""ZhinaoRotaryEmbedding extended with Dynamic NTK scaling. Credits to the Reddit users /u/bloc97 and /u/emozilla"""
|
186 |
+
|
187 |
+
def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
|
188 |
+
self.scaling_factor = scaling_factor
|
189 |
+
super().__init__(dim, max_position_embeddings, base, device)
|
190 |
+
|
191 |
+
def _set_cos_sin_cache(self, seq_len, device, dtype):
|
192 |
+
self.max_seq_len_cached = seq_len
|
193 |
+
|
194 |
+
if seq_len > self.max_position_embeddings:
|
195 |
+
base = self.base * (
|
196 |
+
(self.scaling_factor * seq_len / self.max_position_embeddings) - (self.scaling_factor - 1)
|
197 |
+
) ** (self.dim / (self.dim - 2))
|
198 |
+
inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
|
199 |
+
self.register_buffer("inv_freq", inv_freq, persistent=False)
|
200 |
+
|
201 |
+
t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)
|
202 |
+
|
203 |
+
freqs = torch.einsum("i,j->ij", t, self.inv_freq)
|
204 |
+
# Different from paper, but it uses a different permutation in order to obtain the same calculation
|
205 |
+
emb = torch.cat((freqs, freqs), dim=-1)
|
206 |
+
self.register_buffer("cos_cached", emb.cos()[None, None, :, :].to(dtype), persistent=False)
|
207 |
+
self.register_buffer("sin_cached", emb.sin()[None, None, :, :].to(dtype), persistent=False)
|
208 |
+
|
209 |
+
|
210 |
+
class ZhinaoNTKScalingRotaryEmbedding(torch.nn.Module):
|
211 |
+
def __init__(self, dim, max_position_embeddings=2048, base=10000, scaling_factor=100, device=None):
|
212 |
+
super().__init__()
|
213 |
+
|
214 |
+
self.dim = dim
|
215 |
+
self.max_position_embeddings = max_position_embeddings
|
216 |
+
self.base = base * scaling_factor
|
217 |
+
inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
|
218 |
+
self.register_buffer("inv_freq", inv_freq, persistent=False)
|
219 |
+
|
220 |
+
# Build here to make `torch.jit.trace` work.
|
221 |
+
self._set_cos_sin_cache(
|
222 |
+
seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()
|
223 |
+
)
|
224 |
+
|
225 |
+
def _set_cos_sin_cache(self, seq_len, device, dtype):
|
226 |
+
self.max_seq_len_cached = seq_len
|
227 |
+
t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)
|
228 |
+
freqs = torch.einsum("i,j->ij", t, self.inv_freq)
|
229 |
+
emb = torch.cat((freqs, freqs), dim=-1)
|
230 |
+
self.register_buffer("cos_cached", emb.cos()[None, None, :, :].to(dtype), persistent=False)
|
231 |
+
self.register_buffer("sin_cached", emb.sin()[None, None, :, :].to(dtype), persistent=False)
|
232 |
+
|
233 |
+
def forward(self, x, seq_len=None):
|
234 |
+
if seq_len > self.max_seq_len_cached:
|
235 |
+
self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype)
|
236 |
+
|
237 |
+
return (
|
238 |
+
self.cos_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
|
239 |
+
self.sin_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
|
240 |
+
)
|
241 |
+
|
242 |
+
|
243 |
+
def rotate_half(x):
|
244 |
+
"""Rotates half the hidden dims of the input."""
|
245 |
+
x1 = x[..., : x.shape[-1] // 2]
|
246 |
+
x2 = x[..., x.shape[-1] // 2 :]
|
247 |
+
return torch.cat((-x2, x1), dim=-1)
|
248 |
+
|
249 |
+
|
250 |
+
def apply_rotary_pos_emb(q, k, cos, sin, position_ids):
|
251 |
+
# The first two dimensions of cos and sin are always 1, so we can `squeeze` them.
|
252 |
+
cos = cos.squeeze(1).squeeze(0) # [seq_len, dim]
|
253 |
+
sin = sin.squeeze(1).squeeze(0) # [seq_len, dim]
|
254 |
+
cos = cos[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim]
|
255 |
+
sin = sin[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim]
|
256 |
+
q_embed = (q * cos) + (rotate_half(q) * sin)
|
257 |
+
k_embed = (k * cos) + (rotate_half(k) * sin)
|
258 |
+
return q_embed, k_embed
|
259 |
+
|
260 |
+
|
261 |
+
class ZhinaoMLP(nn.Module):
|
262 |
+
def __init__(self, config):
|
263 |
+
super().__init__()
|
264 |
+
self.config = config
|
265 |
+
self.hidden_size = config.hidden_size
|
266 |
+
self.intermediate_size = config.intermediate_size
|
267 |
+
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
|
268 |
+
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
|
269 |
+
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
|
270 |
+
self.act_fn = ACT2FN[config.hidden_act]
|
271 |
+
|
272 |
+
def forward(self, x):
|
273 |
+
intermediate = self.act_fn(self.gate_proj(x)) * self.up_proj(x)
|
274 |
+
down_proj = self.down_proj(intermediate)
|
275 |
+
return down_proj
|
276 |
+
|
277 |
+
|
278 |
+
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
|
279 |
+
"""
|
280 |
+
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
|
281 |
+
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
|
282 |
+
"""
|
283 |
+
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
|
284 |
+
if n_rep == 1:
|
285 |
+
return hidden_states
|
286 |
+
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
|
287 |
+
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
|
288 |
+
|
289 |
+
|
290 |
+
class ZhinaoAttention(nn.Module):
|
291 |
+
"""Multi-headed attention from 'Attention Is All You Need' paper"""
|
292 |
+
|
293 |
+
def __init__(self, config: ZhinaoConfig):
|
294 |
+
super().__init__()
|
295 |
+
self.config = config
|
296 |
+
self.hidden_size = config.hidden_size
|
297 |
+
self.num_heads = config.num_attention_heads
|
298 |
+
self.head_dim = self.hidden_size // self.num_heads
|
299 |
+
self.num_key_value_heads = config.num_key_value_heads
|
300 |
+
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
|
301 |
+
self.max_position_embeddings = config.max_position_embeddings
|
302 |
+
self.rope_theta = config.rope_theta
|
303 |
+
self.is_causal = True
|
304 |
+
self.dropout = 0.0
|
305 |
+
self.use_flash_attn = config.use_flash_attn
|
306 |
+
|
307 |
+
if (self.head_dim * self.num_heads) != self.hidden_size:
|
308 |
+
raise ValueError(
|
309 |
+
f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
|
310 |
+
f" and `num_heads`: {self.num_heads})."
|
311 |
+
)
|
312 |
+
|
313 |
+
self.qkv_hidden_size = (self.num_heads + 2 * self.num_key_value_heads) * self.head_dim
|
314 |
+
self.qkv_proj = nn.Linear(self.hidden_size, self.qkv_hidden_size, bias=True)
|
315 |
+
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
|
316 |
+
self._init_rope()
|
317 |
+
|
318 |
+
def _init_rope(self):
|
319 |
+
if self.config.rope_scaling is None:
|
320 |
+
self.rotary_emb = ZhinaoRotaryEmbedding(
|
321 |
+
self.head_dim,
|
322 |
+
max_position_embeddings=self.max_position_embeddings,
|
323 |
+
base=self.rope_theta,
|
324 |
+
)
|
325 |
+
else:
|
326 |
+
scaling_type = self.config.rope_scaling["type"]
|
327 |
+
scaling_factor = self.config.rope_scaling["factor"]
|
328 |
+
if scaling_type == "linear":
|
329 |
+
self.rotary_emb = ZhinaoLinearScalingRotaryEmbedding(
|
330 |
+
self.head_dim,
|
331 |
+
max_position_embeddings=self.max_position_embeddings,
|
332 |
+
scaling_factor=scaling_factor,
|
333 |
+
base=self.rope_theta,
|
334 |
+
)
|
335 |
+
elif scaling_type == "dynamic":
|
336 |
+
self.rotary_emb = ZhinaoDynamicNTKScalingRotaryEmbedding(
|
337 |
+
self.head_dim,
|
338 |
+
max_position_embeddings=self.max_position_embeddings,
|
339 |
+
scaling_factor=scaling_factor,
|
340 |
+
base=self.rope_theta,
|
341 |
+
)
|
342 |
+
elif scaling_type == "ntk":
|
343 |
+
self.rotary_emb = ZhinaoNTKScalingRotaryEmbedding(
|
344 |
+
self.head_dim,
|
345 |
+
max_position_embeddings=self.max_position_embeddings,
|
346 |
+
scaling_factor=scaling_factor,
|
347 |
+
base=self.rope_theta,
|
348 |
+
)
|
349 |
+
else:
|
350 |
+
raise ValueError(f"Unknown RoPE scaling type {scaling_type}")
|
351 |
+
|
352 |
+
def raw_attention(self, query_states, key_states, value_states, attention_mask):
|
353 |
+
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
|
354 |
+
|
355 |
+
if attention_mask is not None:
|
356 |
+
attn_weights = attn_weights + attention_mask
|
357 |
+
|
358 |
+
# upcast attention to fp32
|
359 |
+
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
|
360 |
+
attn_output = torch.matmul(attn_weights, value_states)
|
361 |
+
|
362 |
+
attn_output = attn_output.transpose(1, 2).contiguous()
|
363 |
+
|
364 |
+
return attn_output
|
365 |
+
|
366 |
+
def flash_attention(self, query_states, key_states, value_states, attention_mask):
|
367 |
+
# TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache
|
368 |
+
# to be able to avoid many of these transpose/reshape/view.
|
369 |
+
query_states = query_states.transpose(1, 2)
|
370 |
+
key_states = key_states.transpose(1, 2)
|
371 |
+
value_states = value_states.transpose(1, 2)
|
372 |
+
|
373 |
+
batch_size, query_length = query_states.shape[0], query_states.shape[1]
|
374 |
+
query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
|
375 |
+
query_states, key_states, value_states, attention_mask, query_length
|
376 |
+
)
|
377 |
+
|
378 |
+
cu_seqlens_q, cu_seqlens_k = cu_seq_lens
|
379 |
+
max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
|
380 |
+
|
381 |
+
attn_output_unpad = flash_attn_varlen_func(
|
382 |
+
query_states,
|
383 |
+
key_states,
|
384 |
+
value_states,
|
385 |
+
cu_seqlens_q=cu_seqlens_q,
|
386 |
+
cu_seqlens_k=cu_seqlens_k,
|
387 |
+
max_seqlen_q=max_seqlen_in_batch_q,
|
388 |
+
max_seqlen_k=max_seqlen_in_batch_k,
|
389 |
+
dropout_p=self.dropout,
|
390 |
+
softmax_scale=None,
|
391 |
+
causal=self.is_causal,
|
392 |
+
)
|
393 |
+
attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
|
394 |
+
return attn_output
|
395 |
+
|
396 |
+
def forward(
|
397 |
+
self,
|
398 |
+
hidden_states: torch.Tensor,
|
399 |
+
attention_mask: Optional[torch.Tensor] = None,
|
400 |
+
position_ids: Optional[torch.LongTensor] = None,
|
401 |
+
past_key_value: Optional[Tuple[torch.Tensor]] = None,
|
402 |
+
output_attentions: bool = False,
|
403 |
+
use_cache: bool = False,
|
404 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
405 |
+
bsz, q_len, _ = hidden_states.size()
|
406 |
+
|
407 |
+
mixed_x_layer = self.qkv_proj(hidden_states)
|
408 |
+
new_tensor_shape = mixed_x_layer.size()[:-1] + \
|
409 |
+
(self.num_key_value_heads, ((self.num_heads // self.num_key_value_heads + 2) * self.head_dim))
|
410 |
+
mixed_x_layer = mixed_x_layer.view(*new_tensor_shape)
|
411 |
+
query, key_states, value_states = torch.split(
|
412 |
+
mixed_x_layer,
|
413 |
+
[self.num_heads // self.num_key_value_heads * self.head_dim, self.head_dim, self.head_dim],
|
414 |
+
dim=3
|
415 |
+
)
|
416 |
+
# [sq, b, ng, np/ng * hn] -> [sq, b, np, hn]
|
417 |
+
query_states = query.contiguous().view(query.size(0), query.size(1), -1, self.head_dim)
|
418 |
+
|
419 |
+
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
420 |
+
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
421 |
+
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
422 |
+
|
423 |
+
kv_seq_len = key_states.shape[-2]
|
424 |
+
if past_key_value is not None:
|
425 |
+
kv_seq_len += past_key_value[0].shape[-2]
|
426 |
+
rotary_seq_len = max(kv_seq_len, position_ids[:, -1].max().item()) + 1
|
427 |
+
cos, sin = self.rotary_emb(value_states, seq_len=rotary_seq_len)
|
428 |
+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
|
429 |
+
|
430 |
+
if past_key_value is not None:
|
431 |
+
# reuse k, v, self_attention
|
432 |
+
key_states = torch.cat([past_key_value[0], key_states], dim=2)
|
433 |
+
value_states = torch.cat([past_key_value[1], value_states], dim=2)
|
434 |
+
|
435 |
+
past_key_value = (key_states, value_states) if use_cache else None
|
436 |
+
|
437 |
+
# repeat k/v heads if n_kv_heads < n_heads
|
438 |
+
key_states = repeat_kv(key_states, self.num_key_value_groups)
|
439 |
+
value_states = repeat_kv(value_states, self.num_key_value_groups)
|
440 |
+
|
441 |
+
# q, k, v: [b, n, s, h]
|
442 |
+
# check attention mask
|
443 |
+
if self.use_flash_attn:
|
444 |
+
if attention_mask is not None and attention_mask.size() != (bsz, kv_seq_len):
|
445 |
+
raise ValueError(f"Attention mask should be of size {(bsz, kv_seq_len)}, but is {attention_mask.size()}")
|
446 |
+
attn_output = self.flash_attention(query_states, key_states, value_states, attention_mask)
|
447 |
+
else:
|
448 |
+
if attention_mask is not None and attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
|
449 |
+
raise ValueError(f"Attention mask should be of size {bsz, 1, q_len, kv_seq_len}, but is {attention_mask.size()}")
|
450 |
+
attn_output = self.raw_attention(query_states, key_states, value_states, attention_mask)
|
451 |
+
|
452 |
+
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
|
453 |
+
attn_output = self.o_proj(attn_output)
|
454 |
+
|
455 |
+
if not output_attentions:
|
456 |
+
attn_weights = None
|
457 |
+
|
458 |
+
return attn_output, attn_weights, past_key_value
|
459 |
+
|
460 |
+
# Copied from transformers.models.mistral.modeling_mistral.MistralFlashAttention2._upad_input
|
461 |
+
def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
|
462 |
+
batch_size, kv_seq_len, num_heads, head_dim = key_layer.shape
|
463 |
+
|
464 |
+
# On the first iteration we need to properly re-create the padding mask
|
465 |
+
# by slicing it on the proper place
|
466 |
+
if kv_seq_len != attention_mask.shape[-1]:
|
467 |
+
attention_mask_num_tokens = attention_mask.shape[-1]
|
468 |
+
attention_mask = attention_mask[:, attention_mask_num_tokens - kv_seq_len :]
|
469 |
+
|
470 |
+
indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
|
471 |
+
|
472 |
+
key_layer = index_first_axis(key_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k)
|
473 |
+
value_layer = index_first_axis(value_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k)
|
474 |
+
|
475 |
+
if query_length == kv_seq_len:
|
476 |
+
query_layer = index_first_axis(
|
477 |
+
query_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k
|
478 |
+
)
|
479 |
+
cu_seqlens_q = cu_seqlens_k
|
480 |
+
max_seqlen_in_batch_q = max_seqlen_in_batch_k
|
481 |
+
indices_q = indices_k
|
482 |
+
elif query_length == 1:
|
483 |
+
max_seqlen_in_batch_q = 1
|
484 |
+
cu_seqlens_q = torch.arange(
|
485 |
+
batch_size + 1, dtype=torch.int32, device=query_layer.device
|
486 |
+
) # There is a memcpy here, that is very bad.
|
487 |
+
indices_q = cu_seqlens_q[:-1]
|
488 |
+
query_layer = query_layer.squeeze(1)
|
489 |
+
else:
|
490 |
+
# The -q_len: slice assumes left padding.
|
491 |
+
attention_mask = attention_mask[:, -query_length:]
|
492 |
+
|
493 |
+
query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
|
494 |
+
return (
|
495 |
+
query_layer,
|
496 |
+
key_layer,
|
497 |
+
value_layer,
|
498 |
+
indices_q,
|
499 |
+
(cu_seqlens_q, cu_seqlens_k),
|
500 |
+
(max_seqlen_in_batch_q, max_seqlen_in_batch_k),
|
501 |
+
)
|
502 |
+
|
503 |
+
|
504 |
+
class ZhinaoDecoderLayer(nn.Module):
|
505 |
+
def __init__(self, config: ZhinaoConfig):
|
506 |
+
super().__init__()
|
507 |
+
self.hidden_size = config.hidden_size
|
508 |
+
|
509 |
+
self.self_attn = ZhinaoAttention(config=config)
|
510 |
+
self.mlp = ZhinaoMLP(config)
|
511 |
+
self.input_layernorm = ZhinaoRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
512 |
+
self.post_attention_layernorm = ZhinaoRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
513 |
+
|
514 |
+
def forward(
|
515 |
+
self,
|
516 |
+
hidden_states: torch.Tensor,
|
517 |
+
attention_mask: Optional[torch.Tensor] = None,
|
518 |
+
position_ids: Optional[torch.LongTensor] = None,
|
519 |
+
past_key_value: Optional[Tuple[torch.Tensor]] = None,
|
520 |
+
output_attentions: Optional[bool] = False,
|
521 |
+
use_cache: Optional[bool] = False,
|
522 |
+
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
|
523 |
+
"""
|
524 |
+
Args:
|
525 |
+
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
|
526 |
+
attention_mask (`torch.FloatTensor`, *optional*):
|
527 |
+
attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
|
528 |
+
query_sequence_length, key_sequence_length)` if default attention is used.
|
529 |
+
output_attentions (`bool`, *optional*):
|
530 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
531 |
+
returned tensors for more detail.
|
532 |
+
use_cache (`bool`, *optional*):
|
533 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
|
534 |
+
(see `past_key_values`).
|
535 |
+
past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
|
536 |
+
"""
|
537 |
+
|
538 |
+
residual = hidden_states
|
539 |
+
|
540 |
+
hidden_states = self.input_layernorm(hidden_states)
|
541 |
+
|
542 |
+
# Self Attention
|
543 |
+
hidden_states, self_attn_weights, present_key_value = self.self_attn(
|
544 |
+
hidden_states=hidden_states,
|
545 |
+
attention_mask=attention_mask,
|
546 |
+
position_ids=position_ids,
|
547 |
+
past_key_value=past_key_value,
|
548 |
+
output_attentions=output_attentions,
|
549 |
+
use_cache=use_cache,
|
550 |
+
)
|
551 |
+
hidden_states = residual + hidden_states
|
552 |
+
|
553 |
+
# Fully Connected
|
554 |
+
residual = hidden_states
|
555 |
+
hidden_states = self.post_attention_layernorm(hidden_states)
|
556 |
+
hidden_states = self.mlp(hidden_states)
|
557 |
+
hidden_states = residual + hidden_states
|
558 |
+
|
559 |
+
outputs = (hidden_states,)
|
560 |
+
|
561 |
+
if output_attentions:
|
562 |
+
outputs += (self_attn_weights,)
|
563 |
+
|
564 |
+
if use_cache:
|
565 |
+
outputs += (present_key_value,)
|
566 |
+
|
567 |
+
return outputs
|
568 |
+
|
569 |
+
|
570 |
+
class ZhinaoPreTrainedModel(PreTrainedModel):
|
571 |
+
config_class = ZhinaoConfig
|
572 |
+
base_model_prefix = "model"
|
573 |
+
supports_gradient_checkpointing = True
|
574 |
+
_no_split_modules = ["ZhinaoDecoderLayer"]
|
575 |
+
_skip_keys_device_placement = "past_key_values"
|
576 |
+
|
577 |
+
def _init_weights(self, module):
|
578 |
+
std = self.config.initializer_range
|
579 |
+
if isinstance(module, nn.Linear):
|
580 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
581 |
+
if module.bias is not None:
|
582 |
+
module.bias.data.zero_()
|
583 |
+
elif isinstance(module, nn.Embedding):
|
584 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
585 |
+
if module.padding_idx is not None:
|
586 |
+
module.weight.data[module.padding_idx].zero_()
|
587 |
+
|
588 |
+
def _set_gradient_checkpointing(self, module, value=False):
|
589 |
+
if isinstance(module, ZhinaoModel):
|
590 |
+
module.gradient_checkpointing = value
|
591 |
+
|
592 |
+
|
593 |
+
class ZhinaoModel(ZhinaoPreTrainedModel):
|
594 |
+
"""
|
595 |
+
Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`ZhinaoDecoderLayer`]
|
596 |
+
|
597 |
+
Args:
|
598 |
+
config: ZhinaoConfig
|
599 |
+
"""
|
600 |
+
|
601 |
+
def __init__(self, config: ZhinaoConfig):
|
602 |
+
super().__init__(config)
|
603 |
+
self.padding_idx = config.pad_token_id
|
604 |
+
self.vocab_size = config.vocab_size
|
605 |
+
self.use_flash_attn = config.use_flash_attn
|
606 |
+
|
607 |
+
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
|
608 |
+
self.layers = nn.ModuleList([ZhinaoDecoderLayer(config) for _ in range(config.num_hidden_layers)])
|
609 |
+
self.norm = ZhinaoRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
610 |
+
|
611 |
+
self.gradient_checkpointing = False
|
612 |
+
# Initialize weights and apply final processing
|
613 |
+
self.post_init()
|
614 |
+
|
615 |
+
def get_input_embeddings(self):
|
616 |
+
return self.embed_tokens
|
617 |
+
|
618 |
+
def set_input_embeddings(self, value):
|
619 |
+
self.embed_tokens = value
|
620 |
+
|
621 |
+
# Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask
|
622 |
+
def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
|
623 |
+
# create causal mask
|
624 |
+
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
|
625 |
+
combined_attention_mask = None
|
626 |
+
if input_shape[-1] > 1:
|
627 |
+
combined_attention_mask = _make_causal_mask(
|
628 |
+
input_shape,
|
629 |
+
inputs_embeds.dtype,
|
630 |
+
device=inputs_embeds.device,
|
631 |
+
past_key_values_length=past_key_values_length,
|
632 |
+
)
|
633 |
+
|
634 |
+
if attention_mask is not None:
|
635 |
+
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
|
636 |
+
expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to(
|
637 |
+
inputs_embeds.device
|
638 |
+
)
|
639 |
+
combined_attention_mask = (
|
640 |
+
expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
|
641 |
+
)
|
642 |
+
|
643 |
+
return combined_attention_mask
|
644 |
+
|
645 |
+
def forward(
|
646 |
+
self,
|
647 |
+
input_ids: torch.LongTensor = None,
|
648 |
+
attention_mask: Optional[torch.Tensor] = None,
|
649 |
+
position_ids: Optional[torch.LongTensor] = None,
|
650 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
651 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
652 |
+
use_cache: Optional[bool] = None,
|
653 |
+
output_attentions: Optional[bool] = None,
|
654 |
+
output_hidden_states: Optional[bool] = None,
|
655 |
+
return_dict: Optional[bool] = None,
|
656 |
+
) -> Union[Tuple, BaseModelOutputWithPast]:
|
657 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
658 |
+
output_hidden_states = (
|
659 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
660 |
+
)
|
661 |
+
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
662 |
+
|
663 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
664 |
+
|
665 |
+
# retrieve input_ids and inputs_embeds
|
666 |
+
if input_ids is not None and inputs_embeds is not None:
|
667 |
+
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
|
668 |
+
elif input_ids is not None:
|
669 |
+
batch_size, seq_length = input_ids.shape
|
670 |
+
elif inputs_embeds is not None:
|
671 |
+
batch_size, seq_length, _ = inputs_embeds.shape
|
672 |
+
else:
|
673 |
+
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
|
674 |
+
|
675 |
+
seq_length_with_past = seq_length
|
676 |
+
past_key_values_length = 0
|
677 |
+
|
678 |
+
if past_key_values is not None:
|
679 |
+
past_key_values_length = past_key_values[0][0].shape[2]
|
680 |
+
seq_length_with_past = seq_length_with_past + past_key_values_length
|
681 |
+
|
682 |
+
if position_ids is None:
|
683 |
+
device = input_ids.device if input_ids is not None else inputs_embeds.device
|
684 |
+
position_ids = torch.arange(
|
685 |
+
past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
|
686 |
+
)
|
687 |
+
position_ids = position_ids.unsqueeze(0).view(-1, seq_length)
|
688 |
+
else:
|
689 |
+
position_ids = position_ids.view(-1, seq_length).long()
|
690 |
+
|
691 |
+
if inputs_embeds is None:
|
692 |
+
inputs_embeds = self.embed_tokens(input_ids)
|
693 |
+
# embed positions
|
694 |
+
if attention_mask is None:
|
695 |
+
attention_mask = torch.ones(
|
696 |
+
(batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device
|
697 |
+
)
|
698 |
+
|
699 |
+
# (batch_size, 1, seq_length, seq_length)` if default attention is used
|
700 |
+
if not self.use_flash_attn:
|
701 |
+
attention_mask = self._prepare_decoder_attention_mask(
|
702 |
+
attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length
|
703 |
+
)
|
704 |
+
|
705 |
+
hidden_states = inputs_embeds
|
706 |
+
|
707 |
+
if self.gradient_checkpointing and self.training:
|
708 |
+
if use_cache:
|
709 |
+
logger.warning_once(
|
710 |
+
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
|
711 |
+
)
|
712 |
+
use_cache = False
|
713 |
+
|
714 |
+
# decoder layers
|
715 |
+
all_hidden_states = () if output_hidden_states else None
|
716 |
+
all_self_attns = () if output_attentions else None
|
717 |
+
next_decoder_cache = () if use_cache else None
|
718 |
+
|
719 |
+
for idx, decoder_layer in enumerate(self.layers):
|
720 |
+
if output_hidden_states:
|
721 |
+
all_hidden_states += (hidden_states,)
|
722 |
+
|
723 |
+
past_key_value = past_key_values[idx] if past_key_values is not None else None
|
724 |
+
|
725 |
+
if self.gradient_checkpointing and self.training:
|
726 |
+
|
727 |
+
def create_custom_forward(module):
|
728 |
+
def custom_forward(*inputs):
|
729 |
+
# None for past_key_value
|
730 |
+
return module(*inputs, past_key_value, output_attentions)
|
731 |
+
|
732 |
+
return custom_forward
|
733 |
+
|
734 |
+
layer_outputs = torch.utils.checkpoint.checkpoint(
|
735 |
+
create_custom_forward(decoder_layer),
|
736 |
+
hidden_states,
|
737 |
+
attention_mask,
|
738 |
+
position_ids,
|
739 |
+
)
|
740 |
+
else:
|
741 |
+
layer_outputs = decoder_layer(
|
742 |
+
hidden_states,
|
743 |
+
attention_mask=attention_mask,
|
744 |
+
position_ids=position_ids,
|
745 |
+
past_key_value=past_key_value,
|
746 |
+
output_attentions=output_attentions,
|
747 |
+
use_cache=use_cache,
|
748 |
+
)
|
749 |
+
|
750 |
+
hidden_states = layer_outputs[0]
|
751 |
+
|
752 |
+
if use_cache:
|
753 |
+
next_decoder_cache += (layer_outputs[2 if output_attentions else 1],)
|
754 |
+
|
755 |
+
if output_attentions:
|
756 |
+
all_self_attns += (layer_outputs[1],)
|
757 |
+
|
758 |
+
hidden_states = self.norm(hidden_states)
|
759 |
+
|
760 |
+
# add hidden states from the last decoder layer
|
761 |
+
if output_hidden_states:
|
762 |
+
all_hidden_states += (hidden_states,)
|
763 |
+
|
764 |
+
next_cache = next_decoder_cache if use_cache else None
|
765 |
+
if not return_dict:
|
766 |
+
return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
|
767 |
+
|
768 |
+
return BaseModelOutputWithPast(
|
769 |
+
last_hidden_state=hidden_states,
|
770 |
+
past_key_values=next_cache,
|
771 |
+
hidden_states=all_hidden_states,
|
772 |
+
attentions=all_self_attns,
|
773 |
+
)
|
774 |
+
|
775 |
+
|
776 |
+
class ZhinaoForCausalLM(ZhinaoPreTrainedModel):
|
777 |
+
_tied_weights_keys = ["lm_head.weight"]
|
778 |
+
|
779 |
+
def __init__(self, config):
|
780 |
+
super().__init__(config)
|
781 |
+
self.model = ZhinaoModel(config)
|
782 |
+
self.vocab_size = config.vocab_size
|
783 |
+
self.log_logit = config.log_logit
|
784 |
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
785 |
+
|
786 |
+
# Initialize weights and apply final processing
|
787 |
+
if config.bf16:
|
788 |
+
self.model.bfloat16()
|
789 |
+
self.lm_head.bfloat16()
|
790 |
+
if config.fp16:
|
791 |
+
self.model.half()
|
792 |
+
self.lm_head.half()
|
793 |
+
|
794 |
+
if config.use_flash_attn == "auto":
|
795 |
+
if flash_attn_varlen_func:
|
796 |
+
if config.bf16 or config.fp16:
|
797 |
+
logger.warn("Try importing flash-attention.")
|
798 |
+
config.use_flash_attn = True
|
799 |
+
else:
|
800 |
+
config.use_flash_attn = False
|
801 |
+
logger.warn("Flash attention will be disabled because it does NOT support fp32.")
|
802 |
+
else:
|
803 |
+
config.use_flash_attn = False
|
804 |
+
logger.warn("Please install FlashAttention first, " "e.g., with pip install flash-attn")
|
805 |
+
|
806 |
+
self.post_init()
|
807 |
+
|
808 |
+
def get_input_embeddings(self):
|
809 |
+
return self.model.embed_tokens
|
810 |
+
|
811 |
+
def set_input_embeddings(self, value):
|
812 |
+
self.model.embed_tokens = value
|
813 |
+
|
814 |
+
def get_output_embeddings(self):
|
815 |
+
return self.lm_head
|
816 |
+
|
817 |
+
def set_output_embeddings(self, new_embeddings):
|
818 |
+
self.lm_head = new_embeddings
|
819 |
+
|
820 |
+
def set_decoder(self, decoder):
|
821 |
+
self.model = decoder
|
822 |
+
|
823 |
+
def get_decoder(self):
|
824 |
+
return self.model
|
825 |
+
|
826 |
+
def forward(
|
827 |
+
self,
|
828 |
+
input_ids: torch.LongTensor = None,
|
829 |
+
attention_mask: Optional[torch.Tensor] = None,
|
830 |
+
position_ids: Optional[torch.LongTensor] = None,
|
831 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
832 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
833 |
+
labels: Optional[torch.LongTensor] = None,
|
834 |
+
use_cache: Optional[bool] = None,
|
835 |
+
output_attentions: Optional[bool] = None,
|
836 |
+
output_hidden_states: Optional[bool] = None,
|
837 |
+
return_dict: Optional[bool] = None,
|
838 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
839 |
+
|
840 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
841 |
+
output_hidden_states = (
|
842 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
843 |
+
)
|
844 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
845 |
+
|
846 |
+
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
|
847 |
+
outputs = self.model(
|
848 |
+
input_ids=input_ids,
|
849 |
+
attention_mask=attention_mask,
|
850 |
+
position_ids=position_ids,
|
851 |
+
past_key_values=past_key_values,
|
852 |
+
inputs_embeds=inputs_embeds,
|
853 |
+
use_cache=use_cache,
|
854 |
+
output_attentions=output_attentions,
|
855 |
+
output_hidden_states=output_hidden_states,
|
856 |
+
return_dict=return_dict,
|
857 |
+
)
|
858 |
+
|
859 |
+
hidden_states = outputs[0]
|
860 |
+
logits = self.lm_head(hidden_states)
|
861 |
+
|
862 |
+
# warn:Huge gpu memory
|
863 |
+
logits = logits.float()
|
864 |
+
|
865 |
+
# log_logit
|
866 |
+
if self.log_logit:
|
867 |
+
log_res = calc_logits_metric(logits)
|
868 |
+
if not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0:
|
869 |
+
print("logits_log", log_res)
|
870 |
+
|
871 |
+
loss = None
|
872 |
+
if labels is not None:
|
873 |
+
# Shift so that tokens < n predict n
|
874 |
+
shift_logits = logits[..., :-1, :].contiguous()
|
875 |
+
shift_labels = labels[..., 1:].contiguous()
|
876 |
+
# Flatten the tokens
|
877 |
+
loss_fct = CrossEntropyLoss()
|
878 |
+
shift_logits = shift_logits.view(-1, self.config.vocab_size)
|
879 |
+
shift_labels = shift_labels.view(-1)
|
880 |
+
# Enable model parallelism
|
881 |
+
shift_labels = shift_labels.to(shift_logits.device)
|
882 |
+
loss = loss_fct(shift_logits, shift_labels)
|
883 |
+
|
884 |
+
if not return_dict:
|
885 |
+
output = (logits,) + outputs[1:]
|
886 |
+
return (loss,) + output if loss is not None else output
|
887 |
+
|
888 |
+
return CausalLMOutputWithPast(
|
889 |
+
loss=loss,
|
890 |
+
logits=logits,
|
891 |
+
past_key_values=outputs.past_key_values,
|
892 |
+
hidden_states=outputs.hidden_states,
|
893 |
+
attentions=outputs.attentions,
|
894 |
+
)
|
895 |
+
|
896 |
+
def prepare_inputs_for_generation(
|
897 |
+
self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
|
898 |
+
):
|
899 |
+
if past_key_values:
|
900 |
+
input_ids = input_ids[:, -1:]
|
901 |
+
|
902 |
+
position_ids = kwargs.get("position_ids", None)
|
903 |
+
if attention_mask is not None and position_ids is None:
|
904 |
+
# create position_ids on the fly for batch generation
|
905 |
+
position_ids = attention_mask.long().cumsum(-1) - 1
|
906 |
+
position_ids.masked_fill_(attention_mask == 0, 1)
|
907 |
+
if past_key_values:
|
908 |
+
position_ids = position_ids[:, -1].unsqueeze(-1)
|
909 |
+
|
910 |
+
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
|
911 |
+
if inputs_embeds is not None and past_key_values is None:
|
912 |
+
model_inputs = {"inputs_embeds": inputs_embeds}
|
913 |
+
else:
|
914 |
+
model_inputs = {"input_ids": input_ids}
|
915 |
+
|
916 |
+
model_inputs.update(
|
917 |
+
{
|
918 |
+
"position_ids": position_ids,
|
919 |
+
"past_key_values": past_key_values,
|
920 |
+
"use_cache": kwargs.get("use_cache"),
|
921 |
+
"attention_mask": attention_mask,
|
922 |
+
}
|
923 |
+
)
|
924 |
+
return model_inputs
|
925 |
+
|
926 |
+
@staticmethod
|
927 |
+
def _reorder_cache(past_key_values, beam_idx):
|
928 |
+
reordered_past = ()
|
929 |
+
for layer_past in past_key_values:
|
930 |
+
reordered_past += (
|
931 |
+
tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
|
932 |
+
)
|
933 |
+
return reordered_past
|
934 |
+
|
935 |
+
|
936 |
+
def generate(
|
937 |
+
self,
|
938 |
+
inputs: Optional[torch.Tensor] = None,
|
939 |
+
generation_config: Optional[GenerationConfig] = None,
|
940 |
+
streamer = None,
|
941 |
+
**kwargs,
|
942 |
+
):
|
943 |
+
logits_processor = None
|
944 |
+
if generation_config is not None:
|
945 |
+
repetition_penalty = kwargs.pop("repetition_penalty", generation_config.repetition_penalty)
|
946 |
+
generation_config.repetition_penalty = 1.0
|
947 |
+
|
948 |
+
if repetition_penalty > 1.0:
|
949 |
+
warnings.warn("We highly recommend using OpenAI's frequency and presence penalty instead of the original repetition penalty. The original repetition penalty penalizes prompt tokens, which may lead to various potential issues. Therefore, your repetition penalty coefficient will be transformed into frequency penalty and presence penalty.", UserWarning)
|
950 |
+
presence_penalty = repetition_penalty - 1.0
|
951 |
+
frequency_penalty = repetition_penalty - 1.0
|
952 |
+
logits_processor = LogitsProcessorList(
|
953 |
+
[OutputRepetitionPenaltyLogitsProcessor(inputs.size(1), presence_penalty, frequency_penalty, 1.0)]
|
954 |
+
)
|
955 |
+
|
956 |
+
response = super().generate(
|
957 |
+
inputs,
|
958 |
+
generation_config=generation_config,
|
959 |
+
logits_processor=logits_processor,
|
960 |
+
streamer=streamer,
|
961 |
+
**kwargs,
|
962 |
+
)
|
963 |
+
if generation_config is not None:
|
964 |
+
generation_config.repetition_penalty = repetition_penalty
|
965 |
+
return response
|
966 |
+
|
967 |
+
|
968 |
+
def chat(
|
969 |
+
self,
|
970 |
+
tokenizer,
|
971 |
+
messages: List[dict],
|
972 |
+
system: str = "You are a helpful assistant.",
|
973 |
+
stream=False,
|
974 |
+
generation_config: Optional[GenerationConfig]=None):
|
975 |
+
|
976 |
+
generation_config = generation_config or self.generation_config
|
977 |
+
input_ids = make_context(
|
978 |
+
model=self, tokenizer=tokenizer, messages=messages,
|
979 |
+
system=system, max_new_tokens=generation_config.max_new_tokens
|
980 |
+
)
|
981 |
+
|
982 |
+
if stream:
|
983 |
+
streamer = TextIterStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
|
984 |
+
Thread(target=self.generate, kwargs=dict(
|
985 |
+
inputs=input_ids, streamer=streamer,
|
986 |
+
generation_config=generation_config,
|
987 |
+
)).start()
|
988 |
+
return streamer
|
989 |
+
else:
|
990 |
+
outputs = self.generate(input_ids, generation_config=generation_config)
|
991 |
+
response = tokenizer.decode(outputs[0][len(input_ids[0]):], skip_special_tokens=True)
|
992 |
+
return response
|
993 |
+
|
994 |
+
|
995 |
+
class ZhinaoForSequenceClassification(ZhinaoPreTrainedModel):
|
996 |
+
def __init__(self, config):
|
997 |
+
super().__init__(config)
|
998 |
+
self.num_labels = config.num_labels
|
999 |
+
self.model = ZhinaoModel(config)
|
1000 |
+
self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
|
1001 |
+
|
1002 |
+
# Initialize weights and apply final processing
|
1003 |
+
self.post_init()
|
1004 |
+
|
1005 |
+
def get_input_embeddings(self):
|
1006 |
+
return self.model.embed_tokens
|
1007 |
+
|
1008 |
+
def set_input_embeddings(self, value):
|
1009 |
+
self.model.embed_tokens = value
|
1010 |
+
|
1011 |
+
def forward(
|
1012 |
+
self,
|
1013 |
+
input_ids: torch.LongTensor = None,
|
1014 |
+
attention_mask: Optional[torch.Tensor] = None,
|
1015 |
+
position_ids: Optional[torch.LongTensor] = None,
|
1016 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
1017 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
1018 |
+
labels: Optional[torch.LongTensor] = None,
|
1019 |
+
use_cache: Optional[bool] = None,
|
1020 |
+
output_attentions: Optional[bool] = None,
|
1021 |
+
output_hidden_states: Optional[bool] = None,
|
1022 |
+
return_dict: Optional[bool] = None,
|
1023 |
+
) -> Union[Tuple, SequenceClassifierOutputWithPast]:
|
1024 |
+
|
1025 |
+
|
1026 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1027 |
+
|
1028 |
+
transformer_outputs = self.model(
|
1029 |
+
input_ids,
|
1030 |
+
attention_mask=attention_mask,
|
1031 |
+
position_ids=position_ids,
|
1032 |
+
past_key_values=past_key_values,
|
1033 |
+
inputs_embeds=inputs_embeds,
|
1034 |
+
use_cache=use_cache,
|
1035 |
+
output_attentions=output_attentions,
|
1036 |
+
output_hidden_states=output_hidden_states,
|
1037 |
+
return_dict=return_dict,
|
1038 |
+
)
|
1039 |
+
hidden_states = transformer_outputs[0]
|
1040 |
+
logits = self.score(hidden_states)
|
1041 |
+
|
1042 |
+
if input_ids is not None:
|
1043 |
+
batch_size = input_ids.shape[0]
|
1044 |
+
else:
|
1045 |
+
batch_size = inputs_embeds.shape[0]
|
1046 |
+
|
1047 |
+
if self.config.pad_token_id is None and batch_size != 1:
|
1048 |
+
raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
|
1049 |
+
if self.config.pad_token_id is None:
|
1050 |
+
sequence_lengths = -1
|
1051 |
+
else:
|
1052 |
+
if input_ids is not None:
|
1053 |
+
sequence_lengths = (torch.eq(input_ids, self.config.pad_token_id).long().argmax(-1) - 1).to(
|
1054 |
+
logits.device
|
1055 |
+
)
|
1056 |
+
else:
|
1057 |
+
sequence_lengths = -1
|
1058 |
+
|
1059 |
+
pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
|
1060 |
+
|
1061 |
+
loss = None
|
1062 |
+
if labels is not None:
|
1063 |
+
labels = labels.to(logits.device)
|
1064 |
+
if self.config.problem_type is None:
|
1065 |
+
if self.num_labels == 1:
|
1066 |
+
self.config.problem_type = "regression"
|
1067 |
+
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
|
1068 |
+
self.config.problem_type = "single_label_classification"
|
1069 |
+
else:
|
1070 |
+
self.config.problem_type = "multi_label_classification"
|
1071 |
+
|
1072 |
+
if self.config.problem_type == "regression":
|
1073 |
+
loss_fct = MSELoss()
|
1074 |
+
if self.num_labels == 1:
|
1075 |
+
loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
|
1076 |
+
else:
|
1077 |
+
loss = loss_fct(pooled_logits, labels)
|
1078 |
+
elif self.config.problem_type == "single_label_classification":
|
1079 |
+
loss_fct = CrossEntropyLoss()
|
1080 |
+
loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
|
1081 |
+
elif self.config.problem_type == "multi_label_classification":
|
1082 |
+
loss_fct = BCEWithLogitsLoss()
|
1083 |
+
loss = loss_fct(pooled_logits, labels)
|
1084 |
+
if not return_dict:
|
1085 |
+
output = (pooled_logits,) + transformer_outputs[1:]
|
1086 |
+
return ((loss,) + output) if loss is not None else output
|
1087 |
+
|
1088 |
+
return SequenceClassifierOutputWithPast(
|
1089 |
+
loss=loss,
|
1090 |
+
logits=pooled_logits,
|
1091 |
+
past_key_values=transformer_outputs.past_key_values,
|
1092 |
+
hidden_states=transformer_outputs.hidden_states,
|
1093 |
+
attentions=transformer_outputs.attentions,
|
1094 |
+
)
|
special_tokens_map.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"pad_token": "<pad>"
|
3 |
+
}
|
tokenization_zhinao.py
ADDED
@@ -0,0 +1,257 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import torch
|
3 |
+
import base64
|
4 |
+
import tiktoken
|
5 |
+
from typing import Collection, Optional, Dict, List, Set, Tuple, Union
|
6 |
+
from transformers import PreTrainedTokenizer
|
7 |
+
from transformers.utils import PaddingStrategy
|
8 |
+
from transformers.tokenization_utils import PreTrainedTokenizer
|
9 |
+
|
10 |
+
|
11 |
+
PAT_STR = r"""(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\r\n\p{L}\p{N}]?\p{L}+|\p{N}| ?[^\s\p{L}\p{N}]+[\r\n]*|\s*[\r\n]+|\s+(?!\S)|\s+"""
|
12 |
+
|
13 |
+
|
14 |
+
class SPTokenizer:
|
15 |
+
def __init__(self, model_path):
|
16 |
+
self.vocab_file = model_path
|
17 |
+
self.pad_token = '<pad>'
|
18 |
+
self.unk_token = '<unk>'
|
19 |
+
self.mask_token = '<mask>'
|
20 |
+
self.eod_token = '<eod>'
|
21 |
+
self.eop_token = '<eop>'
|
22 |
+
self.im_start_token = '<|im_start|>'
|
23 |
+
self.im_end_token = '<|im_end|>'
|
24 |
+
|
25 |
+
## special_tokens
|
26 |
+
self.SPECIAL_TOKENS = (
|
27 |
+
self.pad_token,
|
28 |
+
self.unk_token,
|
29 |
+
self.mask_token,
|
30 |
+
self.eod_token,
|
31 |
+
self.eop_token,
|
32 |
+
'[space2]', '[space3]', '[space4]', '[space8]',
|
33 |
+
self.im_start_token, self.im_end_token
|
34 |
+
)
|
35 |
+
self.bulid_tokenizer()
|
36 |
+
self.out = self.output_core_token()
|
37 |
+
|
38 |
+
self.token2strs = {
|
39 |
+
"[space2]": " ",
|
40 |
+
"[space3]": " ",
|
41 |
+
"[space4]": " ",
|
42 |
+
"[space8]": " ",
|
43 |
+
}
|
44 |
+
self.str2tokens = {v: k for k, v in self.token2strs.items()}
|
45 |
+
self.sorted_strs = sorted(list(self.str2tokens.keys()),
|
46 |
+
key=lambda x: len(x), reverse=True)
|
47 |
+
|
48 |
+
## skip_special_tokens
|
49 |
+
self.decode_skip_special_tokens = [
|
50 |
+
self.pad_token,
|
51 |
+
self.unk_token,
|
52 |
+
self.mask_token,
|
53 |
+
self.eod_token,
|
54 |
+
self.eop_token,
|
55 |
+
self.im_start_token,
|
56 |
+
self.im_end_token]
|
57 |
+
self.decode_skip_special_tokens_ids = [self.convert_token_to_id(token) for token in self.decode_skip_special_tokens]
|
58 |
+
|
59 |
+
def _load_tiktoken_bpe(self, tiktoken_bpe_file: str):
|
60 |
+
with open(tiktoken_bpe_file, "rb") as f:
|
61 |
+
contents = f.read()
|
62 |
+
return {
|
63 |
+
base64.b64decode(token): int(rank)
|
64 |
+
for token, rank in (line.split() for line in contents.splitlines() if line)
|
65 |
+
}
|
66 |
+
|
67 |
+
def bulid_tokenizer(self):
|
68 |
+
mergeable_ranks = self._load_tiktoken_bpe(self.vocab_file)
|
69 |
+
special_tokens = {
|
70 |
+
token: index
|
71 |
+
for index, token in enumerate(
|
72 |
+
self.SPECIAL_TOKENS, start=len(mergeable_ranks)
|
73 |
+
)
|
74 |
+
}
|
75 |
+
encode = tiktoken.Encoding(
|
76 |
+
"zhinao",
|
77 |
+
pat_str=PAT_STR,
|
78 |
+
mergeable_ranks=mergeable_ranks,
|
79 |
+
special_tokens=special_tokens
|
80 |
+
)
|
81 |
+
decoder = {v: k for k, v in mergeable_ranks.items()}
|
82 |
+
decoder.update({v: k for k, v in special_tokens.items()})
|
83 |
+
decoder_token2id = {v: k for k, v in decoder.items()}
|
84 |
+
|
85 |
+
self.tokenizer = encode
|
86 |
+
self.decoder = decoder
|
87 |
+
self.decoder_token2id = decoder_token2id
|
88 |
+
self.num_tokens = len(mergeable_ranks) + len(self.SPECIAL_TOKENS)
|
89 |
+
|
90 |
+
def output_core_token(self):
|
91 |
+
"""output special tokens"""
|
92 |
+
out = {}
|
93 |
+
for t in self.SPECIAL_TOKENS:
|
94 |
+
out[t] = self.convert_token_to_id(t)
|
95 |
+
return out
|
96 |
+
|
97 |
+
def tokenize(
|
98 |
+
self,
|
99 |
+
text,
|
100 |
+
allowed_special: Union[Set, str] = "all",
|
101 |
+
disallowed_special: Union[Collection, str] = ()):
|
102 |
+
tokens = []
|
103 |
+
text = self.convert(text)
|
104 |
+
for idx in self.tokenizer.encode(text, allowed_special=allowed_special, disallowed_special=disallowed_special):
|
105 |
+
tokens.append(self.decoder[idx])
|
106 |
+
return tokens
|
107 |
+
|
108 |
+
def encode(self, text, allowed_special="all", disallowed_special=()):
|
109 |
+
"""text to id"""
|
110 |
+
text = self.convert(text)
|
111 |
+
return self.tokenizer.encode(text, allowed_special=allowed_special, disallowed_special=disallowed_special)
|
112 |
+
|
113 |
+
def decode(self, ids, errors="replace"):
|
114 |
+
"""id to text"""
|
115 |
+
text = self.tokenizer.decode(ids, errors=errors)
|
116 |
+
return self.deconvert(text)
|
117 |
+
|
118 |
+
def decode_tokens(self, tokens: List[str]) -> str:
|
119 |
+
"""
|
120 |
+
Converts a sequence of tokens in a single string.
|
121 |
+
"""
|
122 |
+
text = ""
|
123 |
+
temp = b""
|
124 |
+
for t in tokens:
|
125 |
+
if isinstance(t, str):
|
126 |
+
if temp:
|
127 |
+
text += temp.decode("utf-8", errors="ignore")
|
128 |
+
temp = b""
|
129 |
+
text += t
|
130 |
+
elif isinstance(t, bytes):
|
131 |
+
temp += t
|
132 |
+
else:
|
133 |
+
raise TypeError("token should only be of type bytes or str")
|
134 |
+
if temp:
|
135 |
+
text += temp.decode("utf-8", errors="ignore")
|
136 |
+
return self.deconvert(text)
|
137 |
+
|
138 |
+
def convert_id_to_token(self, idx):
|
139 |
+
return self.decoder[idx]
|
140 |
+
|
141 |
+
def convert_token_to_id(self, token):
|
142 |
+
return self.decoder_token2id[token]
|
143 |
+
|
144 |
+
def convert(self, text):
|
145 |
+
"""将文本的特殊字符转换成特殊token"""
|
146 |
+
for k in ["[br]", "<br>"]:
|
147 |
+
text = text.replace(k, "\n")
|
148 |
+
for k in self.sorted_strs:
|
149 |
+
if k in text:
|
150 |
+
text = text.replace(k, self.str2tokens[k])
|
151 |
+
return text
|
152 |
+
|
153 |
+
def deconvert(self, text):
|
154 |
+
"""将解码文本恢复原始字符"""
|
155 |
+
for t in self.token2strs:
|
156 |
+
if t in text:
|
157 |
+
text = text.replace(t, self.token2strs[t])
|
158 |
+
return text
|
159 |
+
|
160 |
+
|
161 |
+
class ZhinaoTokenizer(PreTrainedTokenizer):
|
162 |
+
vocab_files_names = {"vocab_file": "vocab/360.tiktoken"}
|
163 |
+
model_input_names = ["input_ids", "attention_mask"]
|
164 |
+
|
165 |
+
def __init__(self, vocab_file, padding_side="left", clean_up_tokenization_spaces=False, **kwargs):
|
166 |
+
self.name = "ZhinaoTokenizer"
|
167 |
+
self.vocab_file = vocab_file
|
168 |
+
self.tokenizer = SPTokenizer(model_path=vocab_file)
|
169 |
+
try:
|
170 |
+
kwargs.pop('eos_token')
|
171 |
+
kwargs.pop('pad_token')
|
172 |
+
kwargs.pop('unk_token')
|
173 |
+
except:
|
174 |
+
pass
|
175 |
+
super().__init__(padding_side=padding_side, clean_up_tokenization_spaces=clean_up_tokenization_spaces, **kwargs)
|
176 |
+
self.pad_token_id = self.tokenizer.convert_token_to_id(self.tokenizer.pad_token)
|
177 |
+
self.eod_id = self.tokenizer.convert_token_to_id(self.tokenizer.eod_token)
|
178 |
+
self.im_start_id = self.tokenizer.convert_token_to_id(self.tokenizer.im_start_token)
|
179 |
+
self.im_end_id = self.tokenizer.convert_token_to_id(self.tokenizer.im_end_token)
|
180 |
+
|
181 |
+
@property
|
182 |
+
def eop_token(self) -> str:
|
183 |
+
return self.tokenizer.eop_token
|
184 |
+
|
185 |
+
@property
|
186 |
+
def eop_token_id(self):
|
187 |
+
return self.tokenizer.convert_token_to_id(self.tokenizer.eop_token)
|
188 |
+
|
189 |
+
@property
|
190 |
+
def vocab_size(self):
|
191 |
+
return self.tokenizer.num_tokens
|
192 |
+
|
193 |
+
def get_vocab(self):
|
194 |
+
""" Returns vocab as a dict """
|
195 |
+
vocab = {self._convert_id_to_token(i): i for i in range(self.vocab_size)}
|
196 |
+
vocab.update(self.added_tokens_encoder)
|
197 |
+
return vocab
|
198 |
+
|
199 |
+
def tokenize(
|
200 |
+
self,
|
201 |
+
text: str,
|
202 |
+
allowed_special: Union[Set, str] = "all",
|
203 |
+
disallowed_special: Union[Collection, str] = (),
|
204 |
+
split_special_tokens=False,
|
205 |
+
) -> List[Union[bytes, str]]:
|
206 |
+
tokens = []
|
207 |
+
for t in self.tokenizer.encode(
|
208 |
+
text, allowed_special=allowed_special, disallowed_special=disallowed_special
|
209 |
+
):
|
210 |
+
tokens.append(self.tokenizer.decoder[t])
|
211 |
+
return tokens
|
212 |
+
|
213 |
+
def _decode(
|
214 |
+
self,
|
215 |
+
token_ids: Union[int, List[int]],
|
216 |
+
skip_special_tokens: bool = False,
|
217 |
+
errors: str = "ignore",
|
218 |
+
**kwargs,
|
219 |
+
) -> str:
|
220 |
+
if isinstance(token_ids, int):
|
221 |
+
token_ids = [token_ids]
|
222 |
+
if skip_special_tokens:
|
223 |
+
token_ids = [i for i in token_ids if i not in self.tokenizer.decode_skip_special_tokens_ids]
|
224 |
+
return self.tokenizer.decode(token_ids, errors=errors)
|
225 |
+
|
226 |
+
def _tokenize(self, text, **kwargs):
|
227 |
+
raise NotImplementedError
|
228 |
+
|
229 |
+
def _convert_token_to_id(self, token):
|
230 |
+
""" Converts a token (str) in an id using the vocab. """
|
231 |
+
return self.tokenizer.convert_token_to_id(token)
|
232 |
+
|
233 |
+
def _convert_id_to_token(self, index):
|
234 |
+
"""Converts an index (integer) in a token (str) using the vocab. """
|
235 |
+
return self.tokenizer.convert_id_to_token(index)
|
236 |
+
|
237 |
+
def convert_tokens_to_string(self, tokens: List[str]) -> str:
|
238 |
+
"""
|
239 |
+
Converts a sequence of tokens in a single string.
|
240 |
+
"""
|
241 |
+
return self.tokenizer.decode_tokens(tokens)
|
242 |
+
|
243 |
+
def save_vocabulary(self, save_directory, filename_prefix=None):
|
244 |
+
"""Save only the vocabulary of the tokenizer (vocabulary). """
|
245 |
+
if os.path.isdir(save_directory):
|
246 |
+
vocab_file = os.path.join(save_directory, self.vocab_files_names["vocab_file"])
|
247 |
+
else:
|
248 |
+
vocab_file = save_directory
|
249 |
+
|
250 |
+
with open(self.vocab_file, 'rb') as fin:
|
251 |
+
proto_str = fin.read()
|
252 |
+
|
253 |
+
os.makedirs(save_directory + "/vocab", exist_ok=True)
|
254 |
+
with open(vocab_file, "wb") as writer:
|
255 |
+
writer.write(proto_str)
|
256 |
+
|
257 |
+
return (vocab_file,)
|
tokenizer_config.json
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"added_tokens_decoder": {},
|
3 |
+
"auto_map": {
|
4 |
+
"AutoTokenizer": [
|
5 |
+
"tokenization_zhinao.ZhinaoTokenizer",
|
6 |
+
null
|
7 |
+
]
|
8 |
+
},
|
9 |
+
"clean_up_tokenization_spaces": false,
|
10 |
+
"do_lower_case": false,
|
11 |
+
"eos_token": "<eod>",
|
12 |
+
"model_max_length": 32768,
|
13 |
+
"pad_token": "<pad>",
|
14 |
+
"padding_side": "right",
|
15 |
+
"remove_space": false,
|
16 |
+
"tokenizer_class": "ZhinaoTokenizer",
|
17 |
+
"unk_token": "<unk>",
|
18 |
+
"chat_template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}"
|
19 |
+
}
|
vocab/360.tiktoken
ADDED
The diff for this file is too large to render.
See raw diff
|
|