tianleliphoebe commited on
Commit
ec0c335
1 Parent(s): da0909b

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +2 -0
  2. 2024-01-10-conv.json +16 -0
  3. LICENSE +201 -0
  4. README.md +374 -8
  5. assets/demo_narrow.gif +3 -0
  6. assets/qa_browser.png +0 -0
  7. assets/screenshot_cli.png +0 -0
  8. assets/screenshot_gui.png +0 -0
  9. assets/server_arch.png +0 -0
  10. assets/vicuna_logo.jpeg +0 -0
  11. controller.log +3 -0
  12. data/dummy_conversation.json +0 -0
  13. diffusion_infer.log +0 -0
  14. docker/Dockerfile +7 -0
  15. docker/docker-compose.yml +36 -0
  16. docs/arena.md +14 -0
  17. docs/awq.md +71 -0
  18. docs/commands/conv_release.md +38 -0
  19. docs/commands/data_cleaning.md +19 -0
  20. docs/commands/leaderboard.md +37 -0
  21. docs/commands/local_cluster.md +38 -0
  22. docs/commands/pypi.md +11 -0
  23. docs/commands/webserver.md +91 -0
  24. docs/dataset_release.md +6 -0
  25. docs/exllama_v2.md +63 -0
  26. docs/gptq.md +59 -0
  27. docs/langchain_integration.md +90 -0
  28. docs/model_support.md +92 -0
  29. docs/openai_api.md +151 -0
  30. docs/server_arch.md +2 -0
  31. docs/training.md +118 -0
  32. docs/vicuna_weights_version.md +97 -0
  33. docs/vllm_integration.md +25 -0
  34. docs/xFasterTransformer.md +90 -0
  35. fastchat/__init__.py +1 -0
  36. fastchat/__pycache__/__init__.cpython-310.pyc +0 -0
  37. fastchat/__pycache__/__init__.cpython-36.pyc +0 -0
  38. fastchat/__pycache__/__init__.cpython-39.pyc +0 -0
  39. fastchat/__pycache__/constants.cpython-39.pyc +0 -0
  40. fastchat/__pycache__/conversation.cpython-39.pyc +0 -0
  41. fastchat/__pycache__/utils.cpython-39.pyc +0 -0
  42. fastchat/constants.py +65 -0
  43. fastchat/conversation.py +1397 -0
  44. fastchat/data/__init__.py +0 -0
  45. fastchat/data/clean_sharegpt.py +217 -0
  46. fastchat/data/convert_alpaca.py +38 -0
  47. fastchat/data/extract_gpt4_only.py +32 -0
  48. fastchat/data/extract_single_round.py +29 -0
  49. fastchat/data/filter_wrong_format.py +44 -0
  50. fastchat/data/get_stats.py +82 -0
.gitattributes CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ assets/demo_narrow.gif filter=lfs diff=lfs merge=lfs -text
37
+ controller.log filter=lfs diff=lfs merge=lfs -text
2024-01-10-conv.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"tstamp": 1704917757.8295, "type": "chat", "model": "imagenhub_LCM", "gen_params": {}, "start": 1704917393.1668, "finish": 1704917757.8295, "state": {"conv_id": "010693d1e27c42fa977e41bbf8d00470", "model_name": "imagenhub_LCM", "online_load": false}, "ip": "::1"}
2
+ {"tstamp": 1704917757.8839, "type": "chat", "model": "imagenhub_PixArtAlpha", "gen_params": {}, "start": 1704917467.339, "finish": 1704917757.8839, "state": {"conv_id": "3df2e8dc3a4e4178adca0e02119916ae", "model_name": "imagenhub_PixArtAlpha", "online_load": false}, "ip": "::1"}
3
+ {"tstamp": 1704919194.4066, "type": "chat", "model": "imagenhub_SDXLTurbo", "gen_params": {}, "start": 1704918934.4326, "finish": 1704919194.4066, "state": {"conv_id": "130e948a677f48689e879d0213e47e58", "model_name": "imagenhub_SDXLTurbo", "online_load": false}, "ip": "::1"}
4
+ {"tstamp": 1704919194.4267, "type": "chat", "model": "imagenhub_PixArtAlpha", "gen_params": {}, "start": 1704918965.3605, "finish": 1704919194.4267, "state": {"conv_id": "974b32e0f90b4e9e9cd8c15af23b3956", "model_name": "imagenhub_PixArtAlpha", "online_load": false}, "ip": "::1"}
5
+ {"tstamp": 1704919332.5045, "type": "chat", "model": "imagenhub_OpenJourney", "gen_params": {}, "start": 1704919292.7339, "finish": 1704919332.5045, "state": {"conv_id": "60350f300cc2493990f08a1538504918", "model_name": "imagenhub_OpenJourney", "online_load": false}, "ip": "::1"}
6
+ {"tstamp": 1704919332.5253, "type": "chat", "model": "imagenhub_SDXLTurbo", "gen_params": {}, "start": 1704919309.8761, "finish": 1704919332.5253, "state": {"conv_id": "0b0c7ab36e1e4943999be27a6d99d8f2", "model_name": "imagenhub_SDXLTurbo", "online_load": false}, "ip": "::1"}
7
+ {"tstamp": 1704919391.8389, "type": "chat", "model": "imagenhub_OpenJourney", "gen_params": {}, "start": 1704919353.4954, "finish": 1704919391.8389, "state": {"conv_id": "60350f300cc2493990f08a1538504918", "model_name": "imagenhub_OpenJourney", "online_load": false}, "ip": "::1"}
8
+ {"tstamp": 1704919391.8612, "type": "chat", "model": "imagenhub_SDXLTurbo", "gen_params": {}, "start": 1704919370.4979, "finish": 1704919391.8612, "state": {"conv_id": "0b0c7ab36e1e4943999be27a6d99d8f2", "model_name": "imagenhub_SDXLTurbo", "online_load": false}, "ip": "::1"}
9
+ {"tstamp": 1704919693.8946, "type": "chat", "model": "imagenhub_OpenJourney", "gen_params": {}, "start": 1704919656.4072, "finish": 1704919693.8946, "state": {"conv_id": "60350f300cc2493990f08a1538504918", "model_name": "imagenhub_OpenJourney", "online_load": false}, "ip": "::1"}
10
+ {"tstamp": 1704919693.9156, "type": "chat", "model": "imagenhub_SDXLTurbo", "gen_params": {}, "start": 1704919672.9026, "finish": 1704919693.9156, "state": {"conv_id": "0b0c7ab36e1e4943999be27a6d99d8f2", "model_name": "imagenhub_SDXLTurbo", "online_load": false}, "ip": "::1"}
11
+ {"tstamp": 1704920687.3046, "type": "chat", "model": "imagenhub_SD", "gen_params": {}, "start": 1704920386.2467, "finish": 1704920687.3046, "state": {"conv_id": "9dbb3b7ce4814e0984b429c914ee10cf", "model_name": "imagenhub_SD", "online_load": false}, "ip": "::1"}
12
+ {"tstamp": 1704920687.3257, "type": "chat", "model": "imagenhub_PixArtAlpha", "gen_params": {}, "start": 1704920412.8058, "finish": 1704920687.3257, "state": {"conv_id": "aab706c3a6a3432cb133f736712218b5", "model_name": "imagenhub_PixArtAlpha", "online_load": false}, "ip": "::1"}
13
+ {"tstamp": 1704922706.2213, "type": "chat", "model": "imagenhub_SD", "gen_params": {}, "start": 1704922630.0243, "finish": 1704922706.2213, "state": {"conv_id": "e1792f1925b545809736a6c394eee899", "model_name": "imagenhub_SD", "online_load": false}, "ip": "::1"}
14
+ {"tstamp": 1704922706.2456, "type": "chat", "model": "imagenhub_LCM", "gen_params": {}, "start": 1704922647.6262, "finish": 1704922706.2456, "state": {"conv_id": "d3209d40407746939789d2ee5dd0f4fa", "model_name": "imagenhub_LCM", "online_load": false}, "ip": "::1"}
15
+ {"tstamp": 1704923421.1464, "type": "rightvote", "models": ["", ""], "states": [{"conv_id": "e1792f1925b545809736a6c394eee899", "model_name": "imagenhub_SD", "online_load": false}, {"conv_id": "d3209d40407746939789d2ee5dd0f4fa", "model_name": "imagenhub_LCM", "online_load": false}], "ip": "::1"}
16
+ {"tstamp": 1704935561.9539, "type": "chat", "model": "imagenhub_LCM", "gen_params": {}, "start": 1704935484.7075, "finish": 1704935561.9539, "state": {"conv_id": "f1d3c338add6424d8fce2e82eb7fdfd4", "model_name": "imagenhub_LCM", "online_load": false}, "ip": "::1"}
LICENSE ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "[]"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright [yyyy] [name of copyright owner]
190
+
191
+ Licensed under the Apache License, Version 2.0 (the "License");
192
+ you may not use this file except in compliance with the License.
193
+ You may obtain a copy of the License at
194
+
195
+ http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+ Unless required by applicable law or agreed to in writing, software
198
+ distributed under the License is distributed on an "AS IS" BASIS,
199
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+ See the License for the specific language governing permissions and
201
+ limitations under the License.
README.md CHANGED
@@ -1,12 +1,378 @@
1
  ---
2
- title: Visual Arena
3
- emoji: 📉
4
- colorFrom: green
5
- colorTo: blue
6
  sdk: gradio
7
- sdk_version: 4.14.0
8
- app_file: app.py
9
- pinned: false
10
  ---
 
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ title: visual-arena
3
+ app_file: fastchat/serve/gradio_web_server_multi.py
 
 
4
  sdk: gradio
5
+ sdk_version: 3.50.2
 
 
6
  ---
7
+ # Useage
8
 
9
+ 1. Install requirement
10
+ ```
11
+ brew install rust cmake
12
+ pip3 install --upgrade pip # enable PEP 660 support
13
+ pip3 install -e ".[model_worker,webui]"
14
+ ```
15
+ 2. Launch controller
16
+ ```
17
+ python3 -m fastchat.serve.controller
18
+ ```
19
+
20
+ 3. Launch web server
21
+ ```
22
+ python3 -m fastchat.serve.gradio_web_server_multi
23
+ ```
24
+
25
+ # FastChat
26
+ | [**Demo**](https://chat.lmsys.org/) | [**Discord**](https://discord.gg/HSWAKCrnFx) | [**X**](https://x.com/lmsysorg) |
27
+
28
+ FastChat is an open platform for training, serving, and evaluating large language model based chatbots.
29
+ - FastChat powers Chatbot Arena (https://chat.lmsys.org/), serving over 5 million chat requests for 30+ LLMs.
30
+ - Arena has collected over 100K human votes from side-by-side LLM battles to compile an online [LLM Elo leaderboard](https://huggingface.co/spaces/lmsys/chatbot-arena-leaderboard).
31
+
32
+ FastChat's core features include:
33
+ - The training and evaluation code for state-of-the-art models (e.g., Vicuna, MT-Bench).
34
+ - A distributed multi-model serving system with web UI and OpenAI-compatible RESTful APIs.
35
+
36
+ ## News
37
+ - [2023/09] 🔥 We released **LMSYS-Chat-1M**, a large-scale real-world LLM conversation dataset. Read the [report](https://arxiv.org/abs/2309.11998).
38
+ - [2023/08] We released **Vicuna v1.5** based on Llama 2 with 4K and 16K context lengths. Download [weights](#vicuna-weights).
39
+ - [2023/07] We released **Chatbot Arena Conversations**, a dataset containing 33k conversations with human preferences. Download it [here](https://huggingface.co/datasets/lmsys/chatbot_arena_conversations).
40
+
41
+ <details>
42
+ <summary>More</summary>
43
+
44
+ - [2023/08] We released **LongChat v1.5** based on Llama 2 with 32K context lengths. Download [weights](#longchat).
45
+ - [2023/06] We introduced **MT-bench**, a challenging multi-turn question set for evaluating chatbots. Check out the blog [post](https://lmsys.org/blog/2023-06-22-leaderboard/).
46
+ - [2023/06] We introduced **LongChat**, our long-context chatbots and evaluation tools. Check out the blog [post](https://lmsys.org/blog/2023-06-29-longchat/).
47
+ - [2023/05] We introduced **Chatbot Arena** for battles among LLMs. Check out the blog [post](https://lmsys.org/blog/2023-05-03-arena).
48
+ - [2023/03] We released **Vicuna: An Open-Source Chatbot Impressing GPT-4 with 90% ChatGPT Quality**. Check out the blog [post](https://vicuna.lmsys.org).
49
+
50
+ </details>
51
+
52
+ <a href="https://chat.lmsys.org"><img src="assets/demo_narrow.gif" width="70%"></a>
53
+
54
+ ## Contents
55
+ - [Install](#install)
56
+ - [Model Weights](#model-weights)
57
+ - [Inference with Command Line Interface](#inference-with-command-line-interface)
58
+ - [Serving with Web GUI](#serving-with-web-gui)
59
+ - [API](#api)
60
+ - [Evaluation](#evaluation)
61
+ - [Fine-tuning](#fine-tuning)
62
+ - [Citation](#citation)
63
+
64
+ ## Install
65
+
66
+ ### Method 1: With pip
67
+
68
+ ```bash
69
+ pip3 install "fschat[model_worker,webui]"
70
+ ```
71
+
72
+ ### Method 2: From source
73
+
74
+ 1. Clone this repository and navigate to the FastChat folder
75
+ ```bash
76
+ git clone https://github.com/lm-sys/FastChat.git
77
+ cd FastChat
78
+ ```
79
+
80
+ If you are running on Mac:
81
+ ```bash
82
+ brew install rust cmake
83
+ ```
84
+
85
+ 2. Install Package
86
+ ```bash
87
+ pip3 install --upgrade pip # enable PEP 660 support
88
+ pip3 install -e ".[model_worker,webui]"
89
+ ```
90
+
91
+ ## Model Weights
92
+ ### Vicuna Weights
93
+ [Vicuna](https://lmsys.org/blog/2023-03-30-vicuna/) is based on Llama 2 and should be used under Llama's [model license](https://github.com/facebookresearch/llama/blob/main/LICENSE).
94
+
95
+ You can use the commands below to start chatting. It will automatically download the weights from Hugging Face repos.
96
+ See more command options and how to handle out-of-memory in the "Inference with Command Line Interface" section below.
97
+
98
+ **NOTE: `transformers>=4.31` is required for 16K versions.**
99
+
100
+ | Size | Chat Command | Hugging Face Repo |
101
+ | --- | --- | --- |
102
+ | 7B | `python3 -m fastchat.serve.cli --model-path lmsys/vicuna-7b-v1.5` | [lmsys/vicuna-7b-v1.5](https://huggingface.co/lmsys/vicuna-7b-v1.5) |
103
+ | 7B-16k | `python3 -m fastchat.serve.cli --model-path lmsys/vicuna-7b-v1.5-16k` | [lmsys/vicuna-7b-v1.5-16k](https://huggingface.co/lmsys/vicuna-7b-v1.5-16k) |
104
+ | 13B | `python3 -m fastchat.serve.cli --model-path lmsys/vicuna-13b-v1.5` | [lmsys/vicuna-13b-v1.5](https://huggingface.co/lmsys/vicuna-13b-v1.5) |
105
+ | 13B-16k | `python3 -m fastchat.serve.cli --model-path lmsys/vicuna-13b-v1.5-16k` | [lmsys/vicuna-13b-v1.5-16k](https://huggingface.co/lmsys/vicuna-13b-v1.5-16k) |
106
+ | 33B | `python3 -m fastchat.serve.cli --model-path lmsys/vicuna-33b-v1.3` | [lmsys/vicuna-33b-v1.3](https://huggingface.co/lmsys/vicuna-33b-v1.3) |
107
+
108
+ **Old weights**: see [docs/vicuna_weights_version.md](docs/vicuna_weights_version.md) for all versions of weights and their differences.
109
+
110
+ ### LongChat
111
+ We release [LongChat](https://lmsys.org/blog/2023-06-29-longchat/) models under Llama's [model license](https://github.com/facebookresearch/llama/blob/main/LICENSE).
112
+
113
+ | Size | Chat Command | Hugging Face Repo |
114
+ | --- | --- | --- |
115
+ | 7B | `python3 -m fastchat.serve.cli --model-path lmsys/longchat-7b-32k-v1.5` | [lmsys/longchat-7b-32k](https://huggingface.co/lmsys/longchat-7b-32k-v1.5) |
116
+
117
+ ### FastChat-T5
118
+ You can use the commands below to chat with FastChat-T5. It will automatically download the weights from Hugging Face repos.
119
+
120
+ | Size | Chat Command | Hugging Face Repo |
121
+ | --- | --- | --- |
122
+ | 3B | `python3 -m fastchat.serve.cli --model-path lmsys/fastchat-t5-3b-v1.0` | [lmsys/fastchat-t5-3b-v1.0](https://huggingface.co/lmsys/fastchat-t5-3b-v1.0) |
123
+
124
+ ## Inference with Command Line Interface
125
+
126
+ <a href="https://chat.lmsys.org"><img src="assets/screenshot_cli.png" width="70%"></a>
127
+
128
+ (Experimental Feature: You can specify `--style rich` to enable rich text output and better text streaming quality for some non-ASCII content. This may not work properly on certain terminals.)
129
+
130
+ #### Supported Models
131
+ FastChat supports a wide range of models, including
132
+ LLama 2, Vicuna, Alpaca, Baize, ChatGLM, Dolly, Falcon, FastChat-T5, GPT4ALL, Guanaco, MTP, OpenAssistant, OpenChat, RedPajama, StableLM, WizardLM, xDAN-AI and more.
133
+
134
+ See a complete list of supported models and instructions to add a new model [here](docs/model_support.md).
135
+
136
+ #### Single GPU
137
+ The command below requires around 14GB of GPU memory for Vicuna-7B and 28GB of GPU memory for Vicuna-13B.
138
+ See the ["Not Enough Memory" section](#not-enough-memory) below if you do not have enough memory.
139
+ `--model-path` can be a local folder or a Hugging Face repo name.
140
+ ```
141
+ python3 -m fastchat.serve.cli --model-path lmsys/vicuna-7b-v1.5
142
+ ```
143
+
144
+ #### Multiple GPUs
145
+ You can use model parallelism to aggregate GPU memory from multiple GPUs on the same machine.
146
+ ```
147
+ python3 -m fastchat.serve.cli --model-path lmsys/vicuna-7b-v1.5 --num-gpus 2
148
+ ```
149
+
150
+ Tips:
151
+ Sometimes the "auto" device mapping strategy in huggingface/transformers does not perfectly balance the memory allocation across multiple GPUs.
152
+ You can use `--max-gpu-memory` to specify the maximum memory per GPU for storing model weights.
153
+ This allows it to allocate more memory for activations, so you can use longer context lengths or larger batch sizes. For example,
154
+
155
+ ```
156
+ python3 -m fastchat.serve.cli --model-path lmsys/vicuna-7b-v1.5 --num-gpus 2 --max-gpu-memory 8GiB
157
+ ```
158
+
159
+ #### CPU Only
160
+ This runs on the CPU only and does not require GPU. It requires around 30GB of CPU memory for Vicuna-7B and around 60GB of CPU memory for Vicuna-13B.
161
+ ```
162
+ python3 -m fastchat.serve.cli --model-path lmsys/vicuna-7b-v1.5 --device cpu
163
+ ```
164
+
165
+ Use Intel AI Accelerator AVX512_BF16/AMX to accelerate CPU inference.
166
+ ```
167
+ CPU_ISA=amx python3 -m fastchat.serve.cli --model-path lmsys/vicuna-7b-v1.5 --device cpu
168
+ ```
169
+
170
+ #### Metal Backend (Mac Computers with Apple Silicon or AMD GPUs)
171
+ Use `--device mps` to enable GPU acceleration on Mac computers (requires torch >= 2.0).
172
+ Use `--load-8bit` to turn on 8-bit compression.
173
+ ```
174
+ python3 -m fastchat.serve.cli --model-path lmsys/vicuna-7b-v1.5 --device mps --load-8bit
175
+ ```
176
+ Vicuna-7B can run on a 32GB M1 Macbook with 1 - 2 words / second.
177
+
178
+ #### Intel XPU (Intel Data Center and Arc A-Series GPUs)
179
+ Install the [Intel Extension for PyTorch](https://intel.github.io/intel-extension-for-pytorch/xpu/latest/tutorials/installation.html). Set the OneAPI environment variables:
180
+ ```
181
+ source /opt/intel/oneapi/setvars.sh
182
+ ```
183
+
184
+ Use `--device xpu` to enable XPU/GPU acceleration.
185
+ ```
186
+ python3 -m fastchat.serve.cli --model-path lmsys/vicuna-7b-v1.5 --device xpu
187
+ ```
188
+ Vicuna-7B can run on an Intel Arc A770 16GB.
189
+
190
+ #### Ascend NPU (Huawei AI Processor)
191
+ Install the [Ascend PyTorch Adapter](https://github.com/Ascend/pytorch). Set the CANN environment variables:
192
+ ```
193
+ source /usr/local/Ascend/ascend-toolkit/set_env.sh
194
+ ```
195
+
196
+ Use `--device npu` to enable NPU acceleration.
197
+ ```
198
+ python3 -m fastchat.serve.cli --model-path lmsys/vicuna-7b-v1.5 --device npu
199
+ ```
200
+ Vicuna-7B/13B can run on an Ascend 910B NPU 60GB.
201
+
202
+ #### Not Enough Memory
203
+ If you do not have enough memory, you can enable 8-bit compression by adding `--load-8bit` to commands above.
204
+ This can reduce memory usage by around half with slightly degraded model quality.
205
+ It is compatible with the CPU, GPU, and Metal backend.
206
+
207
+ Vicuna-13B with 8-bit compression can run on a single GPU with 16 GB of VRAM, like an Nvidia RTX 3090, RTX 4080, T4, V100 (16GB), or an AMD RX 6800 XT.
208
+
209
+ ```
210
+ python3 -m fastchat.serve.cli --model-path lmsys/vicuna-7b-v1.5 --load-8bit
211
+ ```
212
+
213
+ In addition to that, you can add `--cpu-offloading` to commands above to offload weights that don't fit on your GPU onto the CPU memory.
214
+ This requires 8-bit compression to be enabled and the bitsandbytes package to be installed, which is only available on linux operating systems.
215
+
216
+ #### More Platforms and Quantization
217
+ - For AMD GPU users, please install ROCm and [the ROCm version of PyTorch](https://pytorch.org/get-started/locally/) before you install FastChat. See also this [post](https://github.com/lm-sys/FastChat/issues/104#issuecomment-1613791563).
218
+ - FastChat supports ExLlama V2. See [docs/exllama_v2.md](/docs/exllama_v2.md).
219
+ - FastChat supports GPTQ 4bit inference with [GPTQ-for-LLaMa](https://github.com/qwopqwop200/GPTQ-for-LLaMa). See [docs/gptq.md](/docs/gptq.md).
220
+ - FastChat supports AWQ 4bit inference with [mit-han-lab/llm-awq](https://github.com/mit-han-lab/llm-awq). See [docs/awq.md](/docs/awq.md).
221
+ - [MLC LLM](https://mlc.ai/mlc-llm/), backed by [TVM Unity](https://github.com/apache/tvm/tree/unity) compiler, deploys Vicuna natively on phones, consumer-class GPUs and web browsers via Vulkan, Metal, CUDA and WebGPU.
222
+
223
+ ## Serving with Web GUI
224
+
225
+ <a href="https://chat.lmsys.org"><img src="assets/screenshot_gui.png" width="70%"></a>
226
+
227
+ To serve using the web UI, you need three main components: web servers that interface with users, model workers that host one or more models, and a controller to coordinate the webserver and model workers. You can learn more about the architecture [here](docs/server_arch.md).
228
+
229
+ Here are the commands to follow in your terminal:
230
+
231
+ #### Launch the controller
232
+ ```bash
233
+ python3 -m fastchat.serve.controller
234
+ ```
235
+
236
+ This controller manages the distributed workers.
237
+
238
+ #### Launch the model worker(s)
239
+ ```bash
240
+ python3 -m fastchat.serve.model_worker --model-path lmsys/vicuna-7b-v1.5
241
+ ```
242
+ Wait until the process finishes loading the model and you see "Uvicorn running on ...". The model worker will register itself to the controller .
243
+
244
+ To ensure that your model worker is connected to your controller properly, send a test message using the following command:
245
+ ```bash
246
+ python3 -m fastchat.serve.test_message --model-name vicuna-7b-v1.5
247
+ ```
248
+ You will see a short output.
249
+
250
+ #### Launch the Gradio web server
251
+ ```bash
252
+ python3 -m fastchat.serve.gradio_web_server
253
+ ```
254
+
255
+ This is the user interface that users will interact with.
256
+
257
+ By following these steps, you will be able to serve your models using the web UI. You can open your browser and chat with a model now.
258
+ If the models do not show up, try to reboot the gradio web server.
259
+
260
+ #### (Optional): Advanced Features, Scalability
261
+ - You can register multiple model workers to a single controller, which can be used for serving a single model with higher throughput or serving multiple models at the same time. When doing so, please allocate different GPUs and ports for different model workers.
262
+ ```
263
+ # worker 0
264
+ CUDA_VISIBLE_DEVICES=0 python3 -m fastchat.serve.model_worker --model-path lmsys/vicuna-7b-v1.5 --controller http://localhost:21001 --port 31000 --worker http://localhost:31000
265
+ # worker 1
266
+ CUDA_VISIBLE_DEVICES=1 python3 -m fastchat.serve.model_worker --model-path lmsys/fastchat-t5-3b-v1.0 --controller http://localhost:21001 --port 31001 --worker http://localhost:31001
267
+ ```
268
+ - You can also launch a multi-tab gradio server, which includes the Chatbot Arena tabs.
269
+ ```bash
270
+ python3 -m fastchat.serve.gradio_web_server_multi
271
+ ```
272
+ - The default model worker based on huggingface/transformers has great compatibility but can be slow. If you want high-throughput batched serving, you can try [vLLM integration](docs/vllm_integration.md).
273
+
274
+ #### (Optional): Advanced Features, Third Party UI
275
+ - if you want to host it on your own UI or third party UI. Launch the OpenAI compatible server, host with a hosting service like ngrok, and enter the credentials approriatly.
276
+ - https://github.com/WongSaang/chatgpt-ui
277
+ - https://github.com/mckaywrigley/chatbot-ui
278
+ - Note some third party provider only offer the stand `gpt-3.5-turbo, gpt-4, etc`, so you will have to add your own custom model inside the code. [Here is an example of a modification of creating a UI with any custom model name](https://github.com/ztjhz/BetterChatGPT/pull/461)
279
+
280
+
281
+ ## API
282
+ ### OpenAI-Compatible RESTful APIs & SDK
283
+ FastChat provides OpenAI-compatible APIs for its supported models, so you can use FastChat as a local drop-in replacement for OpenAI APIs.
284
+ The FastChat server is compatible with both [openai-python](https://github.com/openai/openai-python) library and cURL commands.
285
+ See [docs/openai_api.md](docs/openai_api.md).
286
+
287
+ ### Hugging Face Generation APIs
288
+ See [fastchat/serve/huggingface_api.py](fastchat/serve/huggingface_api.py).
289
+
290
+ ### LangChain Integration
291
+ See [docs/langchain_integration](docs/langchain_integration.md).
292
+
293
+ ## Evaluation
294
+ We use MT-bench, a set of challenging multi-turn open-ended questions to evaluate models.
295
+ To automate the evaluation process, we prompt strong LLMs like GPT-4 to act as judges and assess the quality of the models' responses.
296
+ See instructions for running MT-bench at [fastchat/llm_judge](fastchat/llm_judge).
297
+
298
+ MT-bench is the new recommended way to benchmark your models. If you are still looking for the old 80 questions used in the vicuna blog post, please go to [vicuna-blog-eval](https://github.com/lm-sys/vicuna-blog-eval).
299
+
300
+ ## Fine-tuning
301
+ ### Data
302
+
303
+ Vicuna is created by fine-tuning a Llama base model using approximately 125K user-shared conversations gathered from ShareGPT.com with public APIs. To ensure data quality, we convert the HTML back to markdown and filter out some inappropriate or low-quality samples. Additionally, we divide lengthy conversations into smaller segments that fit the model's maximum context length. For detailed instructions to clean the ShareGPT data, check out [here](docs/commands/data_cleaning.md).
304
+
305
+ We will not release the ShareGPT dataset. If you would like to try the fine-tuning code, you can run it with some dummy conversations in [dummy_conversation.json](data/dummy_conversation.json). You can follow the same format and plug in your own data.
306
+
307
+ ### Code and Hyperparameters
308
+ Our code is based on [Stanford Alpaca](https://github.com/tatsu-lab/stanford_alpaca) with additional support for multi-turn conversations.
309
+ We use similar hyperparameters as the Stanford Alpaca.
310
+
311
+ | Hyperparameter | Global Batch Size | Learning rate | Epochs | Max length | Weight decay |
312
+ | --- | ---: | ---: | ---: | ---: | ---: |
313
+ | Vicuna-13B | 128 | 2e-5 | 3 | 2048 | 0 |
314
+
315
+ ### Fine-tuning Vicuna-7B with Local GPUs
316
+
317
+ - Install dependency
318
+ ```bash
319
+ pip3 install -e ".[train]"
320
+ ```
321
+
322
+ - You can use the following command to train Vicuna-7B with 4 x A100 (40GB). Update `--model_name_or_path` with the actual path to Llama weights and `--data_path` with the actual path to data.
323
+ ```bash
324
+ torchrun --nproc_per_node=4 --master_port=20001 fastchat/train/train_mem.py \
325
+ --model_name_or_path meta-llama/Llama-2-7b-hf \
326
+ --data_path data/dummy_conversation.json \
327
+ --bf16 True \
328
+ --output_dir output_vicuna \
329
+ --num_train_epochs 3 \
330
+ --per_device_train_batch_size 2 \
331
+ --per_device_eval_batch_size 2 \
332
+ --gradient_accumulation_steps 16 \
333
+ --evaluation_strategy "no" \
334
+ --save_strategy "steps" \
335
+ --save_steps 1200 \
336
+ --save_total_limit 10 \
337
+ --learning_rate 2e-5 \
338
+ --weight_decay 0. \
339
+ --warmup_ratio 0.03 \
340
+ --lr_scheduler_type "cosine" \
341
+ --logging_steps 1 \
342
+ --fsdp "full_shard auto_wrap" \
343
+ --fsdp_transformer_layer_cls_to_wrap 'LlamaDecoderLayer' \
344
+ --tf32 True \
345
+ --model_max_length 2048 \
346
+ --gradient_checkpointing True \
347
+ --lazy_preprocess True
348
+ ```
349
+
350
+ Tips:
351
+ - If you are using V100 which is not supported by FlashAttention, you can use the [memory-efficient attention](https://arxiv.org/abs/2112.05682) implemented in [xFormers](https://github.com/facebookresearch/xformers). Install xformers and replace `fastchat/train/train_mem.py` above with [fastchat/train/train_xformers.py](fastchat/train/train_xformers.py).
352
+ - If you meet out-of-memory due to "FSDP Warning: When using FSDP, it is efficient and recommended... ", see solutions [here](https://github.com/huggingface/transformers/issues/24724#issuecomment-1645189539).
353
+ - If you meet out-of-memory during model saving, see solutions [here](https://github.com/pytorch/pytorch/issues/98823).
354
+ - To turn on logging to popular experiment tracking tools such as Tensorboard, MLFlow or Weights & Biases, use the `report_to` argument, e.g. pass `--report_to wandb` to turn on logging to Weights & Biases.
355
+
356
+ ### Other models, platforms and LoRA support
357
+ More instructions to train other models (e.g., FastChat-T5) and use LoRA are in [docs/training.md](docs/training.md).
358
+
359
+ ### Fine-tuning on Any Cloud with SkyPilot
360
+ [SkyPilot](https://github.com/skypilot-org/skypilot) is a framework built by UC Berkeley for easily and cost effectively running ML workloads on any cloud (AWS, GCP, Azure, Lambda, etc.).
361
+ Find SkyPilot documentation [here](https://github.com/skypilot-org/skypilot/tree/master/llm/vicuna) on using managed spot instances to train Vicuna and save on your cloud costs.
362
+
363
+ ## Citation
364
+ The code (training, serving, and evaluation) in this repository is mostly developed for or derived from the paper below.
365
+ Please cite it if you find the repository helpful.
366
+
367
+ ```
368
+ @misc{zheng2023judging,
369
+ title={Judging LLM-as-a-judge with MT-Bench and Chatbot Arena},
370
+ author={Lianmin Zheng and Wei-Lin Chiang and Ying Sheng and Siyuan Zhuang and Zhanghao Wu and Yonghao Zhuang and Zi Lin and Zhuohan Li and Dacheng Li and Eric. P Xing and Hao Zhang and Joseph E. Gonzalez and Ion Stoica},
371
+ year={2023},
372
+ eprint={2306.05685},
373
+ archivePrefix={arXiv},
374
+ primaryClass={cs.CL}
375
+ }
376
+ ```
377
+
378
+ We are also planning to add more of our research to this repository.
assets/demo_narrow.gif ADDED

Git LFS Details

  • SHA256: 6e02d6a8fc6820f784105b3515b595730cc74542b4cf3f2a84a4361a0db17766
  • Pointer size: 133 Bytes
  • Size of remote file: 12.3 MB
assets/qa_browser.png ADDED
assets/screenshot_cli.png ADDED
assets/screenshot_gui.png ADDED
assets/server_arch.png ADDED
assets/vicuna_logo.jpeg ADDED
controller.log ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ede59662a8052ed939e3bdb888ef1cfc85e94d18e4c3c02221edd4fd2de3f42
3
+ size 12210332
data/dummy_conversation.json ADDED
The diff for this file is too large to render. See raw diff
 
diffusion_infer.log ADDED
The diff for this file is too large to render. See raw diff
 
docker/Dockerfile ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ FROM nvidia/cuda:12.2.0-runtime-ubuntu20.04
2
+
3
+ RUN apt-get update -y && apt-get install -y python3.9 python3.9-distutils curl
4
+ RUN curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py
5
+ RUN python3.9 get-pip.py
6
+ RUN pip3 install fschat
7
+ RUN pip3 install fschat[model_worker,webui] pydantic==1.10.13
docker/docker-compose.yml ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ version: "3.9"
2
+
3
+ services:
4
+ fastchat-controller:
5
+ build:
6
+ context: .
7
+ dockerfile: Dockerfile
8
+ image: fastchat:latest
9
+ ports:
10
+ - "21001:21001"
11
+ entrypoint: ["python3.9", "-m", "fastchat.serve.controller", "--host", "0.0.0.0", "--port", "21001"]
12
+ fastchat-model-worker:
13
+ build:
14
+ context: .
15
+ dockerfile: Dockerfile
16
+ volumes:
17
+ - huggingface:/root/.cache/huggingface
18
+ image: fastchat:latest
19
+ deploy:
20
+ resources:
21
+ reservations:
22
+ devices:
23
+ - driver: nvidia
24
+ count: 1
25
+ capabilities: [gpu]
26
+ entrypoint: ["python3.9", "-m", "fastchat.serve.model_worker", "--model-names", "${FASTCHAT_WORKER_MODEL_NAMES:-vicuna-7b-v1.5}", "--model-path", "${FASTCHAT_WORKER_MODEL_PATH:-lmsys/vicuna-7b-v1.5}", "--worker-address", "http://fastchat-model-worker:21002", "--controller-address", "http://fastchat-controller:21001", "--host", "0.0.0.0", "--port", "21002"]
27
+ fastchat-api-server:
28
+ build:
29
+ context: .
30
+ dockerfile: Dockerfile
31
+ image: fastchat:latest
32
+ ports:
33
+ - "8000:8000"
34
+ entrypoint: ["python3.9", "-m", "fastchat.serve.openai_api_server", "--controller-address", "http://fastchat-controller:21001", "--host", "0.0.0.0", "--port", "8000"]
35
+ volumes:
36
+ huggingface:
docs/arena.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Chatbot Arena
2
+ Chatbot Arena is an LLM benchmark platform featuring anonymous, randomized battles, available at https://chat.lmsys.org.
3
+ We invite the entire community to join this benchmarking effort by contributing your votes and models.
4
+
5
+ ## How to add a new model
6
+ If you want to see a specific model in the arena, you can follow the methods below.
7
+
8
+ - Method 1: Hosted by LMSYS.
9
+ 1. Contribute the code to support this model in FastChat by submitting a pull request. See [instructions](model_support.md#how-to-support-a-new-model).
10
+ 2. After the model is supported, we will try to schedule some compute resources to host the model in the arena. However, due to the limited resources we have, we may not be able to serve every model. We will select the models based on popularity, quality, diversity, and other factors.
11
+
12
+ - Method 2: Hosted by 3rd party API providers or yourself.
13
+ 1. If you have a model hosted by a 3rd party API provider or yourself, please give us an API endpoint. We prefer OpenAI-compatible APIs, so we can reuse our [code](https://github.com/lm-sys/FastChat/blob/33dca5cf12ee602455bfa9b5f4790a07829a2db7/fastchat/serve/gradio_web_server.py#L333-L358) for calling OpenAI models.
14
+ 2. You can use FastChat's OpenAI API [server](openai_api.md) to serve your model with OpenAI-compatible APIs and provide us with the endpoint.
docs/awq.md ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # AWQ 4bit Inference
2
+
3
+ We integrated [AWQ](https://github.com/mit-han-lab/llm-awq) into FastChat to provide **efficient and accurate** 4bit LLM inference.
4
+
5
+ ## Install AWQ
6
+
7
+ Setup environment (please refer to [this link](https://github.com/mit-han-lab/llm-awq#install) for more details):
8
+ ```bash
9
+ conda create -n fastchat-awq python=3.10 -y
10
+ conda activate fastchat-awq
11
+ # cd /path/to/FastChat
12
+ pip install --upgrade pip # enable PEP 660 support
13
+ pip install -e . # install fastchat
14
+
15
+ git clone https://github.com/mit-han-lab/llm-awq repositories/llm-awq
16
+ cd repositories/llm-awq
17
+ pip install -e . # install awq package
18
+
19
+ cd awq/kernels
20
+ python setup.py install # install awq CUDA kernels
21
+ ```
22
+
23
+ ## Chat with the CLI
24
+
25
+ ```bash
26
+ # Download quantized model from huggingface
27
+ # Make sure you have git-lfs installed (https://git-lfs.com)
28
+ git lfs install
29
+ git clone https://huggingface.co/mit-han-lab/vicuna-7b-v1.3-4bit-g128-awq
30
+
31
+ # You can specify which quantized model to use by setting --awq-ckpt
32
+ python3 -m fastchat.serve.cli \
33
+ --model-path models/vicuna-7b-v1.3-4bit-g128-awq \
34
+ --awq-wbits 4 \
35
+ --awq-groupsize 128
36
+ ```
37
+
38
+ ## Benchmark
39
+
40
+ * Through **4-bit weight quantization**, AWQ helps to run larger language models within the device memory restriction and prominently accelerates token generation. All benchmarks are done with group_size 128.
41
+
42
+ * Benchmark on NVIDIA RTX A6000:
43
+
44
+ | Model | Bits | Max Memory (MiB) | Speed (ms/token) | AWQ Speedup |
45
+ | --------------- | ---- | ---------------- | ---------------- | ----------- |
46
+ | vicuna-7b | 16 | 13543 | 26.06 | / |
47
+ | vicuna-7b | 4 | 5547 | 12.43 | 2.1x |
48
+ | llama2-7b-chat | 16 | 13543 | 27.14 | / |
49
+ | llama2-7b-chat | 4 | 5547 | 12.44 | 2.2x |
50
+ | vicuna-13b | 16 | 25647 | 44.91 | / |
51
+ | vicuna-13b | 4 | 9355 | 17.30 | 2.6x |
52
+ | llama2-13b-chat | 16 | 25647 | 47.28 | / |
53
+ | llama2-13b-chat | 4 | 9355 | 20.28 | 2.3x |
54
+
55
+ * NVIDIA RTX 4090:
56
+
57
+ | Model | AWQ 4bit Speed (ms/token) | FP16 Speed (ms/token) | AWQ Speedup |
58
+ | --------------- | ------------------------- | --------------------- | ----------- |
59
+ | vicuna-7b | 8.61 | 19.09 | 2.2x |
60
+ | llama2-7b-chat | 8.66 | 19.97 | 2.3x |
61
+ | vicuna-13b | 12.17 | OOM | / |
62
+ | llama2-13b-chat | 13.54 | OOM | / |
63
+
64
+ * NVIDIA Jetson Orin:
65
+
66
+ | Model | AWQ 4bit Speed (ms/token) | FP16 Speed (ms/token) | AWQ Speedup |
67
+ | --------------- | ------------------------- | --------------------- | ----------- |
68
+ | vicuna-7b | 65.34 | 93.12 | 1.4x |
69
+ | llama2-7b-chat | 75.11 | 104.71 | 1.4x |
70
+ | vicuna-13b | 115.40 | OOM | / |
71
+ | llama2-13b-chat | 136.81 | OOM | / |
docs/commands/conv_release.md ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Chatbot Arena Conversations
2
+
3
+ 1. Gather battles
4
+ ```
5
+ python3 clean_battle_data.py --max-num 10 --mode conv_release
6
+ ```
7
+
8
+ 2. Tag OpenAI moderation
9
+ ```
10
+ python3 tag_openai_moderation.py --in clean_battle_conv_20230814.json
11
+ ```
12
+
13
+ 3. Clean PII
14
+
15
+ 4. Filter additional blocked words
16
+
17
+ ```
18
+ python3 filter_bad_conv.py --in clean_battle_conv_20230630_tagged_v1_pii.json
19
+ ```
20
+
21
+ 5. Add additional toxicity tag
22
+
23
+
24
+ ## All Conversations
25
+
26
+ 1. Gather chats
27
+ ```
28
+ python3 clean_chat_data.py
29
+ ```
30
+
31
+ 2. Sample
32
+ ```
33
+ python3 conv_release_scripts/sample.py
34
+ ```
35
+
36
+
37
+ ## Prompt distribution
38
+
docs/commands/data_cleaning.md ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Data cleaning
2
+
3
+ ## Requirements
4
+ ```
5
+ pip3 install bs4 markdownify
6
+ pip3 install polyglot pyicu pycld2
7
+ ```
8
+
9
+ ## Steps
10
+ ```
11
+ # Convert html to markdown
12
+ python3 -m fastchat.data.clean_sharegpt --in sharegpt_html.json --out sharegpt_clean.json
13
+
14
+ # Keep or remove specific languages
15
+ python3 -m fastchat.data.optional_clean --in sharegpt_clean.json --out sharegpt_clean_lang.json --skip-lang SOME_LANGUAGE_CODE
16
+
17
+ # Split long conversations
18
+ python3 -m fastchat.data.split_long_conversation --in sharegpt_clean_lang.json --out sharegpt_clean_lang_split.json --model-name /home/ubuntu/model_weights/llama-7b/
19
+ ```
docs/commands/leaderboard.md ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ### Get logs
2
+ ```
3
+ gsutil -m rsync -r gs://fastchat_logs ~/fastchat_logs/
4
+ ```
5
+
6
+ ### Clean battle data
7
+ ```
8
+ cd ~/FastChat/fastchat/serve/monitor
9
+ python3 clean_battle_data.py
10
+ ```
11
+
12
+ ### Run Elo analysis
13
+ ```
14
+ python3 elo_analysis.py --clean-battle-file clean_battle_20230905.json
15
+ ```
16
+
17
+ ### Copy files to HF space
18
+ 1. update plots
19
+ ```
20
+ scp atlas:/data/lmzheng/FastChat/fastchat/serve/monitor/elo_results_20230905.pkl .
21
+ ```
22
+
23
+ 2. update table
24
+ ```
25
+ wget https://huggingface.co/spaces/lmsys/chatbot-arena-leaderboard/raw/main/leaderboard_table_20230905.csv
26
+ ```
27
+
28
+ ### Update files on webserver
29
+ ```
30
+ DATE=20231002
31
+
32
+ rm -rf elo_results.pkl leaderboard_table.csv
33
+ wget https://huggingface.co/spaces/lmsys/chatbot-arena-leaderboard/resolve/main/elo_results_$DATE.pkl
34
+ wget https://huggingface.co/spaces/lmsys/chatbot-arena-leaderboard/resolve/main/leaderboard_table_$DATE.csv
35
+ ln -s leaderboard_table_$DATE.csv leaderboard_table.csv
36
+ ln -s elo_results_$DATE.pkl elo_results.pkl
37
+ ```
docs/commands/local_cluster.md ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ### Local GPU cluster
2
+ node-01
3
+ ```
4
+ python3 -m fastchat.serve.controller --host 0.0.0.0 --port 10002
5
+
6
+ CUDA_VISIBLE_DEVICES=0 python3 -m fastchat.serve.vllm_worker --model-path lmsys/vicuna-13b-v1.5 --model-name vicuna-13b --controller http://node-01:10002 --host 0.0.0.0 --port 31000 --worker-address http://$(hostname):31000
7
+ CUDA_VISIBLE_DEVICES=1 python3 -m fastchat.serve.vllm_worker --model-path lmsys/vicuna-13b-v1.5 --model-name vicuna-13b --controller http://node-01:10002 --host 0.0.0.0 --port 31001 --worker-address http://$(hostname):31001
8
+
9
+ CUDA_VISIBLE_DEVICES=2,3 ray start --head
10
+ python3 -m fastchat.serve.vllm_worker --model-path lmsys/vicuna-33b-v1.3 --model-name vicuna-33b --controller http://node-01:10002 --host 0.0.0.0 --port 31002 --worker-address http://$(hostname):31002 --num-gpus 2
11
+ ```
12
+
13
+ node-02
14
+ ```
15
+ CUDA_VISIBLE_DEVICES=0 python3 -m fastchat.serve.vllm_worker --model-path meta-llama/Llama-2-13b-chat-hf --model-name llama-2-13b-chat --controller http://node-01:10002 --host 0.0.0.0 --port 31000 --worker-address http://$(hostname):31000 --tokenizer meta-llama/Llama-2-7b-chat-hf
16
+ CUDA_VISIBLE_DEVICES=1 python3 -m fastchat.serve.vllm_worker --model-path meta-llama/Llama-2-13b-chat-hf --model-name llama-2-13b-chat --controller http://node-01:10002 --host 0.0.0.0 --port 31001 --worker-address http://$(hostname):31001 --tokenizer meta-llama/Llama-2-7b-chat-hf
17
+ CUDA_VISIBLE_DEVICES=2 python3 -m fastchat.serve.vllm_worker --model-path meta-llama/Llama-2-7b-chat-hf --model-name llama-2-7b-chat --controller http://node-01:10002 --host 0.0.0.0 --port 31002 --worker-address http://$(hostname):31002 --tokenizer meta-llama/Llama-2-7b-chat-hf
18
+ CUDA_VISIBLE_DEVICES=3 python3 -m fastchat.serve.vllm_worker --model-path WizardLM/WizardLM-13B-V1.1 --model-name wizardlm-13b --controller http://node-01:10002 --host 0.0.0.0 --port 31003 --worker-address http://$(hostname):31003
19
+ ```
20
+
21
+ node-03
22
+ ```
23
+ python3 -m fastchat.serve.vllm_worker --model-path mosaicml/mpt-30b-chat --controller http://node-01:10002 --host 0.0.0.0 --port 31000 --worker-address http://$(hostname):31000 --num-gpus 2
24
+ python3 -m fastchat.serve.vllm_worker --model-path timdettmers/guanaco-33b-merged --model-name guanaco-33b --controller http://node-01:10002 --host 0.0.0.0 --port 31002 --worker-address http://$(hostname):31002 --num-gpus 2 --tokenizer hf-internal-testing/llama-tokenizer
25
+ ```
26
+
27
+ node-04
28
+ ```
29
+ CUDA_VISIBLE_DEVICES=0 python3 -m fastchat.serve.multi_model_worker --model-path ~/model_weights/RWKV-4-Raven-14B-v12-Eng98%25-Other2%25-20230523-ctx8192.pth --model-name RWKV-4-Raven-14B --model-path lmsys/fastchat-t5-3b-v1.0 --model-name fastchat-t5-3b --controller http://node-01:10002 --host 0.0.0.0 --port 31000 --worker http://$(hostname):31000 --limit 4
30
+ CUDA_VISIBLE_DEVICES=1 python3 -m fastchat.serve.multi_model_worker --model-path OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5 --model-name oasst-pythia-12b --model-path mosaicml/mpt-7b-chat --model-name mpt-7b-chat --controller http://node-01:10002 --host 0.0.0.0 --port 31001 --worker http://$(hostname):31001 --limit 4
31
+ CUDA_VISIBLE_DEVICES=2 python3 -m fastchat.serve.multi_model_worker --model-path lmsys/vicuna-7b-v1.5 --model-name vicuna-7b --model-path THUDM/chatglm-6b --model-name chatglm-6b --controller http://node-01:10002 --host 0.0.0.0 --port 31002 --worker http://$(hostname):31002 --limit 4
32
+ CUDA_VISIBLE_DEVICES=3 python3 -m fastchat.serve.vllm_worker --model-path ~/model_weights/alpaca-13b --controller http://node-01:10002 --host 0.0.0.0 --port 31003 --worker-address http://$(hostname):31003
33
+ ```
34
+
35
+ test
36
+ ```
37
+ python3 -m fastchat.serve.test_message --model vicuna-13b --controller http://localhost:10002
38
+ ```
docs/commands/pypi.md ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ### Requirement
2
+ ```
3
+ python3 -m pip install twine
4
+ python3 -m pip install --upgrade pip
5
+ pip3 install build
6
+ ```
7
+
8
+ ### Upload
9
+ ```
10
+ bash scripts/upload_pypi.sh
11
+ ```
docs/commands/webserver.md ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ### Install
2
+ ```
3
+ sudo apt update
4
+ sudo apt install tmux htop
5
+
6
+ wget https://repo.anaconda.com/archive/Anaconda3-2022.10-Linux-x86_64.sh
7
+ bash Anaconda3-2022.10-Linux-x86_64.sh
8
+
9
+ conda create -n fastchat python=3.9
10
+ conda activate fastchat
11
+
12
+ git clone https://github.com/lm-sys/FastChat.git
13
+ cd FastChat
14
+ pip3 install -e .
15
+ ```
16
+
17
+
18
+ ### Launch servers
19
+ ```
20
+ cd fastchat_logs/controller
21
+ python3 -m fastchat.serve.controller --host 0.0.0.0 --port 21001
22
+ python3 -m fastchat.serve.register_worker --controller http://localhost:21001 --worker-name https://
23
+ python3 -m fastchat.serve.test_message --model vicuna-13b --controller http://localhost:21001
24
+
25
+ cd fastchat_logs/server0
26
+
27
+ export OPENAI_API_KEY=
28
+ export ANTHROPIC_API_KEY=
29
+
30
+ python3 -m fastchat.serve.gradio_web_server_multi --controller http://localhost:21001 --concurrency 10 --add-chatgpt --add-claude --add-palm --anony-only --elo ~/elo_results/elo_results.pkl --leaderboard-table-file ~/elo_results/leaderboard_table.csv --register ~/elo_results/register_oai_models.json --show-terms
31
+
32
+ python3 backup_logs.py
33
+ ```
34
+
35
+
36
+ ### Check the launch time
37
+ ```
38
+ for i in $(seq 0 11); do cat fastchat_logs/server$i/gradio_web_server.log | grep "Running on local URL" | tail -n 1; done
39
+ ```
40
+
41
+
42
+ ### Increase the limit of max open files
43
+ One process (do not need reboot)
44
+ ```
45
+ sudo prlimit --nofile=1048576:1048576 --pid=$id
46
+
47
+ for id in $(ps -ef | grep gradio_web_server | awk '{print $2}'); do echo $id; prlimit --nofile=1048576:1048576 --pid=$id; done
48
+ ```
49
+
50
+ System (need reboot): Add the lines below to `/etc/security/limits.conf`
51
+ ```
52
+ * hard nofile 65535
53
+ * soft nofile 65535
54
+ ```
55
+
56
+
57
+ ### Gradio edit (3.35.2)
58
+ 1. gtag and canvas
59
+ ```
60
+ vim /home/vicuna/anaconda3/envs/fastchat/lib/python3.9/site-packages/gradio/templates/frontend/index.html
61
+ ```
62
+
63
+ ```
64
+ <!-- Google tag (gtag.js) -->
65
+ <script async src="https://www.googletagmanager.com/gtag/js?id=G-K6D24EE9ED"></script><script>
66
+ window.dataLayer = window.dataLayer || [];
67
+ function gtag(){dataLayer.push(arguments);}
68
+ gtag('js', new Date());
69
+ gtag('config', 'G-K6D24EE9ED');
70
+ window.__gradio_mode__ = "app";
71
+ </script>
72
+ <script src="https://cdnjs.cloudflare.com/ajax/libs/html2canvas/1.4.1/html2canvas.min.js"></script>
73
+ ```
74
+
75
+ 2. deprecation warnings
76
+ ```
77
+ vim /home/vicuna/anaconda3/envs/fastchat/lib/python3.9/site-packages/gradio/deprecation.py
78
+ ```
79
+
80
+ ```
81
+ def check_deprecated_parameters(
82
+ ```
83
+
84
+ 3. Loading
85
+ ```
86
+ vim /home/vicuna/anaconda3/envs/fastchat/lib/python3.9/site-packages/gradio/templates/frontend/assets/index-188ef5e8.js
87
+ ```
88
+
89
+ ```
90
+ %s/"Loading..."/"Loading...(Please refresh if it takes more than 30 seconds)"/g
91
+ ```
docs/dataset_release.md ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ ## Datasets
2
+ We release the following datasets based on our projects and websites.
3
+
4
+ - [LMSYS-Chat-1M: A Large-Scale Real-World LLM Conversation Dataset](https://huggingface.co/datasets/lmsys/lmsys-chat-1m)
5
+ - [Chatbot Arena Conversation Dataset](https://huggingface.co/datasets/lmsys/chatbot_arena_conversations)
6
+ - [MT-bench Human Annotation Dataset](https://huggingface.co/datasets/lmsys/mt_bench_human_judgments)
docs/exllama_v2.md ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ExllamaV2 GPTQ Inference Framework
2
+
3
+ Integrated [ExllamaV2](https://github.com/turboderp/exllamav2) customized kernel into Fastchat to provide **Faster** GPTQ inference speed.
4
+
5
+ **Note: Exllama not yet support embedding REST API.**
6
+
7
+ ## Install ExllamaV2
8
+
9
+ Setup environment (please refer to [this link](https://github.com/turboderp/exllamav2#how-to) for more details):
10
+
11
+ ```bash
12
+ git clone https://github.com/turboderp/exllamav2
13
+ cd exllamav2
14
+ pip install -e .
15
+ ```
16
+
17
+ Chat with the CLI:
18
+ ```bash
19
+ python3 -m fastchat.serve.cli \
20
+ --model-path models/vicuna-7B-1.1-GPTQ-4bit-128g \
21
+ --enable-exllama
22
+ ```
23
+
24
+ Start model worker:
25
+ ```bash
26
+ # Download quantized model from huggingface
27
+ # Make sure you have git-lfs installed (https://git-lfs.com)
28
+ git lfs install
29
+ git clone https://huggingface.co/TheBloke/vicuna-7B-1.1-GPTQ-4bit-128g models/vicuna-7B-1.1-GPTQ-4bit-128g
30
+
31
+ # Load model with default configuration (max sequence length 4096, no GPU split setting).
32
+ python3 -m fastchat.serve.model_worker \
33
+ --model-path models/vicuna-7B-1.1-GPTQ-4bit-128g \
34
+ --enable-exllama
35
+
36
+ #Load model with max sequence length 2048, allocate 18 GB to CUDA:0 and 24 GB to CUDA:1.
37
+ python3 -m fastchat.serve.model_worker \
38
+ --model-path models/vicuna-7B-1.1-GPTQ-4bit-128g \
39
+ --enable-exllama \
40
+ --exllama-max-seq-len 2048 \
41
+ --exllama-gpu-split 18,24
42
+ ```
43
+
44
+ `--exllama-cache-8bit` can be used to enable 8-bit caching with exllama and save some VRAM.
45
+
46
+ ## Performance
47
+
48
+ Reference: https://github.com/turboderp/exllamav2#performance
49
+
50
+
51
+ | Model | Mode | Size | grpsz | act | V1: 3090Ti | V1: 4090 | V2: 3090Ti | V2: 4090 |
52
+ |------------|--------------|-------|-------|-----|------------|----------|------------|-------------|
53
+ | Llama | GPTQ | 7B | 128 | no | 143 t/s | 173 t/s | 175 t/s | **195** t/s |
54
+ | Llama | GPTQ | 13B | 128 | no | 84 t/s | 102 t/s | 105 t/s | **110** t/s |
55
+ | Llama | GPTQ | 33B | 128 | yes | 37 t/s | 45 t/s | 45 t/s | **48** t/s |
56
+ | OpenLlama | GPTQ | 3B | 128 | yes | 194 t/s | 226 t/s | 295 t/s | **321** t/s |
57
+ | CodeLlama | EXL2 4.0 bpw | 34B | - | - | - | - | 42 t/s | **48** t/s |
58
+ | Llama2 | EXL2 3.0 bpw | 7B | - | - | - | - | 195 t/s | **224** t/s |
59
+ | Llama2 | EXL2 4.0 bpw | 7B | - | - | - | - | 164 t/s | **197** t/s |
60
+ | Llama2 | EXL2 5.0 bpw | 7B | - | - | - | - | 144 t/s | **160** t/s |
61
+ | Llama2 | EXL2 2.5 bpw | 70B | - | - | - | - | 30 t/s | **35** t/s |
62
+ | TinyLlama | EXL2 3.0 bpw | 1.1B | - | - | - | - | 536 t/s | **635** t/s |
63
+ | TinyLlama | EXL2 4.0 bpw | 1.1B | - | - | - | - | 509 t/s | **590** t/s |
docs/gptq.md ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # GPTQ 4bit Inference
2
+
3
+ Support GPTQ 4bit inference with [GPTQ-for-LLaMa](https://github.com/qwopqwop200/GPTQ-for-LLaMa).
4
+
5
+ 1. Window user: use the `old-cuda` branch.
6
+ 2. Linux user: recommend the `fastest-inference-4bit` branch.
7
+
8
+ ## Install
9
+
10
+ Setup environment:
11
+ ```bash
12
+ # cd /path/to/FastChat
13
+ git clone https://github.com/qwopqwop200/GPTQ-for-LLaMa.git repositories/GPTQ-for-LLaMa
14
+ cd repositories/GPTQ-for-LLaMa
15
+ # Window's user should use the `old-cuda` branch
16
+ git switch fastest-inference-4bit
17
+ # Install `quant-cuda` package in FastChat's virtualenv
18
+ python3 setup_cuda.py install
19
+ pip3 install texttable
20
+ ```
21
+
22
+ Chat with the CLI:
23
+ ```bash
24
+ python3 -m fastchat.serve.cli \
25
+ --model-path models/vicuna-7B-1.1-GPTQ-4bit-128g \
26
+ --gptq-wbits 4 \
27
+ --gptq-groupsize 128
28
+ ```
29
+
30
+ Start model worker:
31
+ ```bash
32
+ # Download quantized model from huggingface
33
+ # Make sure you have git-lfs installed (https://git-lfs.com)
34
+ git lfs install
35
+ git clone https://huggingface.co/TheBloke/vicuna-7B-1.1-GPTQ-4bit-128g models/vicuna-7B-1.1-GPTQ-4bit-128g
36
+
37
+ python3 -m fastchat.serve.model_worker \
38
+ --model-path models/vicuna-7B-1.1-GPTQ-4bit-128g \
39
+ --gptq-wbits 4 \
40
+ --gptq-groupsize 128
41
+
42
+ # You can specify which quantized model to use
43
+ python3 -m fastchat.serve.model_worker \
44
+ --model-path models/vicuna-7B-1.1-GPTQ-4bit-128g \
45
+ --gptq-ckpt models/vicuna-7B-1.1-GPTQ-4bit-128g/vicuna-7B-1.1-GPTQ-4bit-128g.safetensors \
46
+ --gptq-wbits 4 \
47
+ --gptq-groupsize 128 \
48
+ --gptq-act-order
49
+ ```
50
+
51
+ ## Benchmark
52
+
53
+ | LLaMA-13B | branch | Bits | group-size | memory(MiB) | PPL(c4) | Median(s/token) | act-order | speed up |
54
+ | --------- | ---------------------- | ---- | ---------- | ----------- | ------- | --------------- | --------- | -------- |
55
+ | FP16 | fastest-inference-4bit | 16 | - | 26634 | 6.96 | 0.0383 | - | 1x |
56
+ | GPTQ | triton | 4 | 128 | 8590 | 6.97 | 0.0551 | - | 0.69x |
57
+ | GPTQ | fastest-inference-4bit | 4 | 128 | 8699 | 6.97 | 0.0429 | true | 0.89x |
58
+ | GPTQ | fastest-inference-4bit | 4 | 128 | 8699 | 7.03 | 0.0287 | false | 1.33x |
59
+ | GPTQ | fastest-inference-4bit | 4 | -1 | 8448 | 7.12 | 0.0284 | false | 1.44x |
docs/langchain_integration.md ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Local LangChain with FastChat
2
+
3
+ [LangChain](https://python.langchain.com/en/latest/index.html) is a library that facilitates the development of applications by leveraging large language models (LLMs) and enabling their composition with other sources of computation or knowledge.
4
+ FastChat's OpenAI-compatible [API server](openai_api.md) enables using LangChain with open models seamlessly.
5
+
6
+ ## Launch RESTful API Server
7
+
8
+ Here are the steps to launch a local OpenAI API server for LangChain.
9
+
10
+ First, launch the controller
11
+
12
+ ```bash
13
+ python3 -m fastchat.serve.controller
14
+ ```
15
+
16
+ LangChain uses OpenAI model names by default, so we need to assign some faux OpenAI model names to our local model.
17
+ Here, we use Vicuna as an example and use it for three endpoints: chat completion, completion, and embedding.
18
+ `--model-path` can be a local folder or a Hugging Face repo name.
19
+ See a full list of supported models [here](../README.md#supported-models).
20
+
21
+ ```bash
22
+ python3 -m fastchat.serve.model_worker --model-names "gpt-3.5-turbo,text-davinci-003,text-embedding-ada-002" --model-path lmsys/vicuna-7b-v1.5
23
+ ```
24
+
25
+ Finally, launch the RESTful API server
26
+
27
+ ```bash
28
+ python3 -m fastchat.serve.openai_api_server --host localhost --port 8000
29
+ ```
30
+
31
+ ## Set OpenAI Environment
32
+
33
+ You can set your environment with the following commands.
34
+
35
+ Set OpenAI base url
36
+
37
+ ```bash
38
+ export OPENAI_API_BASE=http://localhost:8000/v1
39
+ ```
40
+
41
+ Set OpenAI API key
42
+
43
+ ```bash
44
+ export OPENAI_API_KEY=EMPTY
45
+ ```
46
+
47
+ If you meet the following OOM error while creating embeddings, please set a smaller batch size by using environment variables.
48
+
49
+ ~~~bash
50
+ openai.error.APIError: Invalid response object from API: '{"object":"error","message":"**NETWORK ERROR DUE TO HIGH TRAFFIC. PLEASE REGENERATE OR REFRESH THIS PAGE.**\\n\\n(CUDA out of memory. Tried to allocate xxx MiB (GPU 0; xxx GiB total capacity; xxx GiB already allocated; xxx MiB free; xxx GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF)","code":50002}' (HTTP response code was 400)
51
+ ~~~
52
+
53
+ You can try `export FASTCHAT_WORKER_API_EMBEDDING_BATCH_SIZE=1`.
54
+
55
+ ## Try local LangChain
56
+
57
+ Here is a question answerting example.
58
+
59
+ Download a text file.
60
+
61
+ ```bash
62
+ wget https://raw.githubusercontent.com/hwchase17/langchain/v0.0.200/docs/modules/state_of_the_union.txt
63
+ ```
64
+
65
+ Run LangChain.
66
+
67
+ ~~~py
68
+ from langchain.chat_models import ChatOpenAI
69
+ from langchain.document_loaders import TextLoader
70
+ from langchain.embeddings import OpenAIEmbeddings
71
+ from langchain.indexes import VectorstoreIndexCreator
72
+
73
+ embedding = OpenAIEmbeddings(model="text-embedding-ada-002")
74
+ loader = TextLoader("state_of_the_union.txt")
75
+ index = VectorstoreIndexCreator(embedding=embedding).from_loaders([loader])
76
+ llm = ChatOpenAI(model="gpt-3.5-turbo")
77
+
78
+ questions = [
79
+ "Who is the speaker",
80
+ "What did the president say about Ketanji Brown Jackson",
81
+ "What are the threats to America",
82
+ "Who are mentioned in the speech",
83
+ "Who is the vice president",
84
+ "How many projects were announced",
85
+ ]
86
+
87
+ for query in questions:
88
+ print("Query:", query)
89
+ print("Answer:", index.query(query, llm=llm))
90
+ ~~~
docs/model_support.md ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Model Support
2
+
3
+ ## Supported models
4
+
5
+ - [meta-llama/Llama-2-7b-chat-hf](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf)
6
+ - example: `python3 -m fastchat.serve.cli --model-path meta-llama/Llama-2-7b-chat-hf`
7
+ - Vicuna, Alpaca, LLaMA, Koala
8
+ - example: `python3 -m fastchat.serve.cli --model-path lmsys/vicuna-7b-v1.5`
9
+ - [BAAI/AquilaChat-7B](https://huggingface.co/BAAI/AquilaChat-7B)
10
+ - [BAAI/AquilaChat2-7B](https://huggingface.co/BAAI/AquilaChat2-7B)
11
+ - [BAAI/AquilaChat2-34B](https://huggingface.co/BAAI/AquilaChat2-34B)
12
+ - [BAAI/bge-large-en](https://huggingface.co/BAAI/bge-large-en#using-huggingface-transformers)
13
+ - [baichuan-inc/baichuan-7B](https://huggingface.co/baichuan-inc/baichuan-7B)
14
+ - [BlinkDL/RWKV-4-Raven](https://huggingface.co/BlinkDL/rwkv-4-raven)
15
+ - example: `python3 -m fastchat.serve.cli --model-path ~/model_weights/RWKV-4-Raven-7B-v11x-Eng99%-Other1%-20230429-ctx8192.pth`
16
+ - [bofenghuang/vigogne-2-7b-instruct](https://huggingface.co/bofenghuang/vigogne-2-7b-instruct)
17
+ - [bofenghuang/vigogne-2-7b-chat](https://huggingface.co/bofenghuang/vigogne-2-7b-chat)
18
+ - [camel-ai/CAMEL-13B-Combined-Data](https://huggingface.co/camel-ai/CAMEL-13B-Combined-Data)
19
+ - [codellama/CodeLlama-7b-Instruct-hf](https://huggingface.co/codellama/CodeLlama-7b-Instruct-hf)
20
+ - [databricks/dolly-v2-12b](https://huggingface.co/databricks/dolly-v2-12b)
21
+ - [FlagAlpha/Llama2-Chinese-13b-Chat](https://huggingface.co/FlagAlpha/Llama2-Chinese-13b-Chat)
22
+ - [FreedomIntelligence/phoenix-inst-chat-7b](https://huggingface.co/FreedomIntelligence/phoenix-inst-chat-7b)
23
+ - [FreedomIntelligence/ReaLM-7b-v1](https://huggingface.co/FreedomIntelligence/Realm-7b)
24
+ - [h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-7b](https://huggingface.co/h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-7b)
25
+ - [internlm/internlm-chat-7b](https://huggingface.co/internlm/internlm-chat-7b)
26
+ - [lcw99/polyglot-ko-12.8b-chang-instruct-chat](https://huggingface.co/lcw99/polyglot-ko-12.8b-chang-instruct-chat)
27
+ - [lmsys/fastchat-t5-3b-v1.0](https://huggingface.co/lmsys/fastchat-t5)
28
+ - [mosaicml/mpt-7b-chat](https://huggingface.co/mosaicml/mpt-7b-chat)
29
+ - example: `python3 -m fastchat.serve.cli --model-path mosaicml/mpt-7b-chat`
30
+ - [Neutralzz/BiLLa-7B-SFT](https://huggingface.co/Neutralzz/BiLLa-7B-SFT)
31
+ - [nomic-ai/gpt4all-13b-snoozy](https://huggingface.co/nomic-ai/gpt4all-13b-snoozy)
32
+ - [NousResearch/Nous-Hermes-13b](https://huggingface.co/NousResearch/Nous-Hermes-13b)
33
+ - [openaccess-ai-collective/manticore-13b-chat-pyg](https://huggingface.co/openaccess-ai-collective/manticore-13b-chat-pyg)
34
+ - [OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5](https://huggingface.co/OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5)
35
+ - [openchat/openchat_3.5](https://huggingface.co/openchat/openchat_3.5)
36
+ - [Open-Orca/Mistral-7B-OpenOrca](https://huggingface.co/Open-Orca/Mistral-7B-OpenOrca)
37
+ - [VMware/open-llama-7b-v2-open-instruct](https://huggingface.co/VMware/open-llama-7b-v2-open-instruct)
38
+ - [Phind/Phind-CodeLlama-34B-v2](https://huggingface.co/Phind/Phind-CodeLlama-34B-v2)
39
+ - [project-baize/baize-v2-7b](https://huggingface.co/project-baize/baize-v2-7b)
40
+ - [Qwen/Qwen-7B-Chat](https://huggingface.co/Qwen/Qwen-7B-Chat)
41
+ - [Salesforce/codet5p-6b](https://huggingface.co/Salesforce/codet5p-6b)
42
+ - [StabilityAI/stablelm-tuned-alpha-7b](https://huggingface.co/stabilityai/stablelm-tuned-alpha-7b)
43
+ - [THUDM/chatglm-6b](https://huggingface.co/THUDM/chatglm-6b)
44
+ - [THUDM/chatglm2-6b](https://huggingface.co/THUDM/chatglm2-6b)
45
+ - [tiiuae/falcon-40b](https://huggingface.co/tiiuae/falcon-40b)
46
+ - [tiiuae/falcon-180B-chat](https://huggingface.co/tiiuae/falcon-180B-chat)
47
+ - [timdettmers/guanaco-33b-merged](https://huggingface.co/timdettmers/guanaco-33b-merged)
48
+ - [togethercomputer/RedPajama-INCITE-7B-Chat](https://huggingface.co/togethercomputer/RedPajama-INCITE-7B-Chat)
49
+ - [WizardLM/WizardLM-13B-V1.0](https://huggingface.co/WizardLM/WizardLM-13B-V1.0)
50
+ - [WizardLM/WizardCoder-15B-V1.0](https://huggingface.co/WizardLM/WizardCoder-15B-V1.0)
51
+ - [HuggingFaceH4/starchat-beta](https://huggingface.co/HuggingFaceH4/starchat-beta)
52
+ - [HuggingFaceH4/zephyr-7b-alpha](https://huggingface.co/HuggingFaceH4/zephyr-7b-alpha)
53
+ - [Xwin-LM/Xwin-LM-7B-V0.1](https://huggingface.co/Xwin-LM/Xwin-LM-70B-V0.1)
54
+ - [OpenLemur/lemur-70b-chat-v1](https://huggingface.co/OpenLemur/lemur-70b-chat-v1)
55
+ - [allenai/tulu-2-dpo-7b](https://huggingface.co/allenai/tulu-2-dpo-7b)
56
+ - [Microsoft/Orca-2-7b](https://huggingface.co/microsoft/Orca-2-7b)
57
+ - [deepseek-ai/deepseek-llm-67b-chat](https://huggingface.co/deepseek-ai/deepseek-llm-67b-chat)
58
+ - [deepseek-ai/deepseek-coder-33b-instruct](https://huggingface.co/deepseek-ai/deepseek-coder-33b-instruct)
59
+ - [meta-math/MetaMath-7B-V1.0](https://huggingface.co/meta-math/MetaMath-7B-V1.0)
60
+ - Any [EleutherAI](https://huggingface.co/EleutherAI) pythia model such as [pythia-6.9b](https://huggingface.co/EleutherAI/pythia-6.9b)
61
+ - Any [Peft](https://github.com/huggingface/peft) adapter trained on top of a
62
+ model above. To activate, must have `peft` in the model path. Note: If
63
+ loading multiple peft models, you can have them share the base model weights by
64
+ setting the environment variable `PEFT_SHARE_BASE_WEIGHTS=true` in any model
65
+ worker.
66
+
67
+ ## How to support a new model
68
+
69
+ To support a new model in FastChat, you need to correctly handle its prompt template and model loading.
70
+ The goal is to make the following command run with the correct prompts.
71
+
72
+ ```
73
+ python3 -m fastchat.serve.cli --model [YOUR_MODEL_PATH]
74
+ ```
75
+
76
+ You can run this example command to learn the code logic.
77
+
78
+ ```
79
+ python3 -m fastchat.serve.cli --model lmsys/vicuna-7b-v1.5
80
+ ```
81
+
82
+ You can add `--debug` to see the actual prompt sent to the model.
83
+
84
+ ### Steps
85
+
86
+ FastChat uses the `Conversation` class to handle prompt templates and `BaseModelAdapter` class to handle model loading.
87
+
88
+ 1. Implement a conversation template for the new model at [fastchat/conversation.py](https://github.com/lm-sys/FastChat/blob/main/fastchat/conversation.py). You can follow existing examples and use `register_conv_template` to add a new one. Please also add a link to the official reference code if possible.
89
+ 2. Implement a model adapter for the new model at [fastchat/model/model_adapter.py](https://github.com/lm-sys/FastChat/blob/main/fastchat/model/model_adapter.py). You can follow existing examples and use `register_model_adapter` to add a new one.
90
+ 3. (Optional) add the model name to the "Supported models" [section](#supported-models) above and add more information in [fastchat/model/model_registry.py](https://github.com/lm-sys/FastChat/blob/main/fastchat/model/model_registry.py).
91
+
92
+ After these steps, the new model should be compatible with most FastChat features, such as CLI, web UI, model worker, and OpenAI-compatible API server. Please do some testing with these features as well.
docs/openai_api.md ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # OpenAI-Compatible RESTful APIs
2
+
3
+ FastChat provides OpenAI-compatible APIs for its supported models, so you can use FastChat as a local drop-in replacement for OpenAI APIs.
4
+ The FastChat server is compatible with both [openai-python](https://github.com/openai/openai-python) library and cURL commands.
5
+
6
+ The following OpenAI APIs are supported:
7
+ - Chat Completions. (Reference: https://platform.openai.com/docs/api-reference/chat)
8
+ - Completions. (Reference: https://platform.openai.com/docs/api-reference/completions)
9
+ - Embeddings. (Reference: https://platform.openai.com/docs/api-reference/embeddings)
10
+
11
+ ## RESTful API Server
12
+ First, launch the controller
13
+
14
+ ```bash
15
+ python3 -m fastchat.serve.controller
16
+ ```
17
+
18
+ Then, launch the model worker(s)
19
+
20
+ ```bash
21
+ python3 -m fastchat.serve.model_worker --model-path lmsys/vicuna-7b-v1.5
22
+ ```
23
+
24
+ Finally, launch the RESTful API server
25
+
26
+ ```bash
27
+ python3 -m fastchat.serve.openai_api_server --host localhost --port 8000
28
+ ```
29
+
30
+ Now, let us test the API server.
31
+
32
+ ### OpenAI Official SDK
33
+ The goal of `openai_api_server.py` is to implement a fully OpenAI-compatible API server, so the models can be used directly with [openai-python](https://github.com/openai/openai-python) library.
34
+
35
+ First, install openai-python:
36
+ ```bash
37
+ pip install --upgrade openai
38
+ ```
39
+
40
+ Then, interact with model vicuna:
41
+ ```python
42
+ import openai
43
+ # to get proper authentication, make sure to use a valid key that's listed in
44
+ # the --api-keys flag. if no flag value is provided, the `api_key` will be ignored.
45
+ openai.api_key = "EMPTY"
46
+ openai.api_base = "http://localhost:8000/v1"
47
+
48
+ model = "vicuna-7b-v1.5"
49
+ prompt = "Once upon a time"
50
+
51
+ # create a completion
52
+ completion = openai.Completion.create(model=model, prompt=prompt, max_tokens=64)
53
+ # print the completion
54
+ print(prompt + completion.choices[0].text)
55
+
56
+ # create a chat completion
57
+ completion = openai.ChatCompletion.create(
58
+ model=model,
59
+ messages=[{"role": "user", "content": "Hello! What is your name?"}]
60
+ )
61
+ # print the completion
62
+ print(completion.choices[0].message.content)
63
+ ```
64
+
65
+ Streaming is also supported. See [test_openai_api.py](../tests/test_openai_api.py). If your api server is behind a proxy you'll need to turn off buffering, you can do so in Nginx by setting `proxy_buffering off;` in the location block for the proxy.
66
+
67
+ ### cURL
68
+ cURL is another good tool for observing the output of the api.
69
+
70
+ List Models:
71
+ ```bash
72
+ curl http://localhost:8000/v1/models
73
+ ```
74
+
75
+ Chat Completions:
76
+ ```bash
77
+ curl http://localhost:8000/v1/chat/completions \
78
+ -H "Content-Type: application/json" \
79
+ -d '{
80
+ "model": "vicuna-7b-v1.5",
81
+ "messages": [{"role": "user", "content": "Hello! What is your name?"}]
82
+ }'
83
+ ```
84
+
85
+ Text Completions:
86
+ ```bash
87
+ curl http://localhost:8000/v1/completions \
88
+ -H "Content-Type: application/json" \
89
+ -d '{
90
+ "model": "vicuna-7b-v1.5",
91
+ "prompt": "Once upon a time",
92
+ "max_tokens": 41,
93
+ "temperature": 0.5
94
+ }'
95
+ ```
96
+
97
+ Embeddings:
98
+ ```bash
99
+ curl http://localhost:8000/v1/embeddings \
100
+ -H "Content-Type: application/json" \
101
+ -d '{
102
+ "model": "vicuna-7b-v1.5",
103
+ "input": "Hello world!"
104
+ }'
105
+ ```
106
+
107
+ ### Running multiple
108
+
109
+ If you want to run multiple models on the same machine and in the same process,
110
+ you can replace the `model_worker` step above with a multi model variant:
111
+
112
+ ```bash
113
+ python3 -m fastchat.serve.multi_model_worker \
114
+ --model-path lmsys/vicuna-7b-v1.5 \
115
+ --model-names vicuna-7b-v1.5 \
116
+ --model-path lmsys/longchat-7b-16k \
117
+ --model-names longchat-7b-16k
118
+ ```
119
+
120
+ This loads both models into the same accelerator and in the same process. This
121
+ works best when using a Peft model that triggers the `PeftModelAdapter`.
122
+
123
+ TODO: Base model weight optimization will be fixed once [this
124
+ Peft](https://github.com/huggingface/peft/issues/430) issue is resolved.
125
+
126
+ ## LangChain Support
127
+ This OpenAI-compatible API server supports LangChain. See [LangChain Integration](langchain_integration.md) for details.
128
+
129
+ ## Adjusting Environment Variables
130
+
131
+ ### Timeout
132
+ By default, a timeout error will occur if a model worker does not response within 100 seconds. If your model/hardware is slower, you can change this timeout through an environment variable:
133
+
134
+ ```bash
135
+ export FASTCHAT_WORKER_API_TIMEOUT=<larger timeout in seconds>
136
+ ```
137
+
138
+ ### Batch size
139
+ If you meet the following OOM error while creating embeddings. You can use a smaller batch size by setting
140
+
141
+ ```bash
142
+ export FASTCHAT_WORKER_API_EMBEDDING_BATCH_SIZE=1
143
+ ```
144
+
145
+ ## Todos
146
+ Some features to be implemented:
147
+
148
+ - [ ] Support more parameters like `logprobs`, `logit_bias`, `user`, `presence_penalty` and `frequency_penalty`
149
+ - [ ] Model details (permissions, owner and create time)
150
+ - [ ] Edits API
151
+ - [ ] Rate Limitation Settings
docs/server_arch.md ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ # FastChat Server Architecture
2
+ ![server arch](../assets/server_arch.png)
docs/training.md ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ### Fine-tuning FastChat-T5
2
+ You can use the following command to train FastChat-T5 with 4 x A100 (40GB).
3
+ ```bash
4
+ torchrun --nproc_per_node=4 --master_port=9778 fastchat/train/train_flant5.py \
5
+ --model_name_or_path google/flan-t5-xl \
6
+ --data_path ./data/dummy_conversation.json \
7
+ --bf16 True \
8
+ --output_dir ./checkpoints_flant5_3b \
9
+ --num_train_epochs 3 \
10
+ --per_device_train_batch_size 1 \
11
+ --per_device_eval_batch_size 1 \
12
+ --gradient_accumulation_steps 4 \
13
+ --evaluation_strategy "no" \
14
+ --save_strategy "steps" \
15
+ --save_steps 300 \
16
+ --save_total_limit 1 \
17
+ --learning_rate 2e-5 \
18
+ --weight_decay 0. \
19
+ --warmup_ratio 0.03 \
20
+ --lr_scheduler_type "cosine" \
21
+ --logging_steps 1 \
22
+ --fsdp "full_shard auto_wrap" \
23
+ --fsdp_transformer_layer_cls_to_wrap T5Block \
24
+ --tf32 True \
25
+ --model_max_length 2048 \
26
+ --preprocessed_path ./preprocessed_data/processed.json \
27
+ --gradient_checkpointing True
28
+ ```
29
+
30
+ After training, please use our post-processing [function](https://github.com/lm-sys/FastChat/blob/55051ad0f23fef5eeecbda14a2e3e128ffcb2a98/fastchat/utils.py#L166-L185) to update the saved model weight. Additional discussions can be found [here](https://github.com/lm-sys/FastChat/issues/643).
31
+
32
+ ### Fine-tuning using (Q)LoRA
33
+ You can use the following command to train Vicuna-7B using QLoRA using ZeRO2. Note that ZeRO3 is not currently supported with QLoRA but ZeRO3 does support LoRA, which has a reference configuraiton under playground/deepspeed_config_s3.json. To use QLoRA, you must have bitsandbytes>=0.39.0 and transformers>=4.30.0 installed.
34
+ ```bash
35
+ deepspeed fastchat/train/train_lora.py \
36
+ --model_name_or_path ~/model_weights/llama-7b \
37
+ --lora_r 8 \
38
+ --lora_alpha 16 \
39
+ --lora_dropout 0.05 \
40
+ --data_path ./data/dummy_conversation.json \
41
+ --bf16 True \
42
+ --output_dir ./checkpoints \
43
+ --num_train_epochs 3 \
44
+ --per_device_train_batch_size 1 \
45
+ --per_device_eval_batch_size 1 \
46
+ --gradient_accumulation_steps 1 \
47
+ --evaluation_strategy "no" \
48
+ --save_strategy "steps" \
49
+ --save_steps 1200 \
50
+ --save_total_limit 100 \
51
+ --learning_rate 2e-5 \
52
+ --weight_decay 0. \
53
+ --warmup_ratio 0.03 \
54
+ --lr_scheduler_type "cosine" \
55
+ --logging_steps 1 \
56
+ --tf32 True \
57
+ --model_max_length 2048 \
58
+ --q_lora True \
59
+ --deepspeed playground/deepspeed_config_s2.json \
60
+ ```
61
+
62
+ For T5-XL or XXL
63
+
64
+ ```bash
65
+ deepspeed fastchat/train/train_lora_t5.py \
66
+ --model_name_or_path google/flan-t5-xl \
67
+ --data_path ./data/dummy_conversation.json \
68
+ --bf16 True \
69
+ --output_dir ./checkpoints_flant5_3b \
70
+ --num_train_epochs 3 \
71
+ --per_device_train_batch_size 1 \
72
+ --per_device_eval_batch_size 1 \
73
+ --gradient_accumulation_steps 4 \
74
+ --evaluation_strategy "no" \
75
+ --save_strategy "steps" \
76
+ --save_steps 300 \
77
+ --save_total_limit 1 \
78
+ --learning_rate 2e-5 \
79
+ --weight_decay 0. \
80
+ --warmup_ratio 0.03 \
81
+ --lr_scheduler_type "cosine" \
82
+ --logging_steps 1 \
83
+ --model_max_length 2048 \
84
+ --preprocessed_path ./preprocessed_data/processed.json \
85
+ --gradient_checkpointing True \
86
+ --q_lora True \
87
+ --deepspeed playground/deepspeed_config_s2.json
88
+
89
+ ```
90
+
91
+ ### Fine-tuning Vicuna-7B with Local NPUs
92
+
93
+ You can use the following command to train Vicuna-7B with 8 x 910B (60GB). Use `--nproc_per_node` to specify the number of NPUs.
94
+ ```bash
95
+ torchrun --nproc_per_node=8 --master_port=20001 fastchat/train/train.py \
96
+ --model_name_or_path ~/vicuna-7b-v1.5-16k \
97
+ --data_path data/dummy_conversation.json \
98
+ --fp16 True \
99
+ --output_dir output_vicuna \
100
+ --num_train_epochs 3 \
101
+ --per_device_train_batch_size 8 \
102
+ --per_device_eval_batch_size 1 \
103
+ --gradient_accumulation_steps 1 \
104
+ --evaluation_strategy "no" \
105
+ --save_strategy "steps" \
106
+ --save_steps 1200 \
107
+ --save_total_limit 10 \
108
+ --learning_rate 2e-5 \
109
+ --weight_decay 0. \
110
+ --warmup_ratio 0.03 \
111
+ --lr_scheduler_type "cosine" \
112
+ --logging_steps 1 \
113
+ --fsdp "full_shard auto_wrap" \
114
+ --fsdp_transformer_layer_cls_to_wrap 'LlamaDecoderLayer' \
115
+ --model_max_length 2048 \
116
+ --gradient_checkpointing True \
117
+ --lazy_preprocess True
118
+ ```
docs/vicuna_weights_version.md ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Vicuna Weights
2
+
3
+ | Weights version | Link | FastChat version compatibility | Base Model | Release Date | Fine-tuning Data |
4
+ | ---- | ---- | ---- | ---- | ---- | ---- |
5
+ | v1.5 | [7B](https://huggingface.co/lmsys/vicuna-7b-v1.5), [7B-16k](https://huggingface.co/lmsys/vicuna-7b-v1.5-16k), [13B](https://huggingface.co/lmsys/vicuna-13b-v1.5), [13B-16k](https://huggingface.co/lmsys/vicuna-13b-v1.5-16k) | `>=0.2.21` | Llama 2 | Aug. 1, 2023 | 370M tokens |
6
+ | v1.3 | [7B](https://huggingface.co/lmsys/vicuna-7b-v1.3), [13B](https://huggingface.co/lmsys/vicuna-13b-v1.3), [33B](//huggingface.co/lmsys/vicuna-33b-v1.3) | `>=0.2.1` | Llama 1 | Jun. 22, 2023 | 370M tokens |
7
+ | v1.1 | [7B](https://huggingface.co/lmsys/vicuna-7b-v1.1), [13B](https://huggingface.co/lmsys/vicuna-13b-v1.1) | `>=0.2.1` | Llama 1 | Apr. 12, 2023 | - |
8
+ | v0 | [7B-delta](https://huggingface.co/lmsys/vicuna-7b-delta-v0), [13B-delta](https://huggingface.co/lmsys/vicuna-13b-delta-v0) | `<=0.1.10` | Llama 1 | Mar. 30, 2023 | - |
9
+
10
+ ### Updates
11
+ - Major updates of weights v1.5
12
+ - Use Llama2 as the base model.
13
+ - Provide 16K context length versions using linear RoPE scaling.
14
+
15
+ - Major updates of weights v1.3
16
+ - Train with twice the amount of ShareGPT data compared to previous versions.
17
+ - Provide merged weights directly instead of delta weights.
18
+
19
+ - Major updates of weights v1.1
20
+ - Refactor the tokenization and separator. In Vicuna v1.1, the separator has been changed from `###` to the EOS token `</s>`. This change makes it easier to determine the generation stop criteria and enables better compatibility with other libraries.
21
+ - Fix the supervised fine-tuning loss computation for better model quality.
22
+
23
+ ## Prompt Template
24
+
25
+ ### Example prompt (weights v1.1, v1.3, v1.5)
26
+ ```
27
+ A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions.
28
+
29
+ USER: Hello!
30
+ ASSISTANT: Hello!</s>
31
+ USER: How are you?
32
+ ASSISTANT: I am good.</s>
33
+ ```
34
+
35
+ See a full prompt template [here](https://github.com/lm-sys/FastChat/blob/d578599c69d060e6d40943f1b5b72af98956092a/fastchat/conversation.py#L286-L299) and example output [here](https://github.com/lm-sys/FastChat/blob/d578599c69d060e6d40943f1b5b72af98956092a/fastchat/conversation.py#L748-L753).
36
+
37
+ ### Example prompt (weights v0)
38
+ ```
39
+ A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.
40
+
41
+ ### Human: Hello!
42
+ ### Assistant: Hello!
43
+ ### Human: How are you?
44
+ ### Assistant: I am good.
45
+ ```
46
+
47
+ See the full prompt template [here](https://github.com/lm-sys/FastChat/blob/d578599c69d060e6d40943f1b5b72af98956092a/fastchat/conversation.py#L238-L269).
48
+
49
+ ## How to Apply Delta Weights (Only Needed for Weights v0)
50
+
51
+ We release [Vicuna](https://lmsys.org/blog/2023-03-30-vicuna/) weights v0 as delta weights to comply with the LLaMA model license.
52
+ You can add our delta to the original LLaMA weights to obtain the Vicuna weights. Instructions:
53
+
54
+ 1. Get the original LLaMA weights in the Hugging Face format by following the instructions [here](https://huggingface.co/docs/transformers/main/model_doc/llama).
55
+ 2. Use the following scripts to get Vicuna weights by applying our delta. They will automatically download delta weights from our Hugging Face [account](https://huggingface.co/lmsys).
56
+
57
+ **NOTE**:
58
+ Weights v1.1 are only compatible with ```transformers>=4.28.0``` and ``fschat >= 0.2.0``.
59
+ Please update your local packages accordingly. If you follow the above commands to do a fresh install, then you should get all the correct versions.
60
+
61
+ #### Vicuna-7B
62
+ This conversion command needs around 30 GB of CPU RAM.
63
+ See the "Low CPU Memory Conversion" section below if you do not have enough memory.
64
+ Replace `/path/to/*` with the real paths.
65
+ ```bash
66
+ python3 -m fastchat.model.apply_delta \
67
+ --base-model-path /path/to/llama-7b \
68
+ --target-model-path /path/to/output/vicuna-7b \
69
+ --delta-path lmsys/vicuna-7b-delta-v1.1
70
+ ```
71
+
72
+ #### Vicuna-13B
73
+ This conversion command needs around 60 GB of CPU RAM.
74
+ See the "Low CPU Memory Conversion" section below if you do not have enough memory.
75
+ Replace `/path/to/*` with the real paths.
76
+ ```bash
77
+ python3 -m fastchat.model.apply_delta \
78
+ --base-model-path /path/to/llama-13b \
79
+ --target-model-path /path/to/output/vicuna-13b \
80
+ --delta-path lmsys/vicuna-13b-delta-v1.1
81
+ ```
82
+
83
+ #### Low CPU Memory Conversion
84
+ You can try these methods to reduce the CPU RAM requirement of weight conversion.
85
+ 1. Append `--low-cpu-mem` to the commands above, which will split large weight files into smaller ones and use the disk as temporary storage. This can keep the peak memory at less than 16GB.
86
+ 2. Create a large swap file and rely on the operating system to automatically utilize the disk as virtual memory.
87
+
88
+ ## FAQ
89
+
90
+ ### Tokenizer issues
91
+ There are some frequently asked tokenizer issues (https://github.com/lm-sys/FastChat/issues/408).
92
+ Some of them are not only related to FastChat or Vicuna weights but are also related to how you convert the base llama model.
93
+
94
+ We suggest that you use `transformers>=4.28.0` and redo the weight conversion for the base llama model.
95
+ After applying the delta, you should have a file named `special_tokens_map.json` in your converted weight folder for either v0 or v1.1.
96
+ The contents of this file should be the same as this file: https://huggingface.co/lmsys/vicuna-13b-delta-v0/blob/main/special_tokens_map.json.
97
+ If the file is not present, please copy the `special_tokens_map.json` and `tokenizer_config.json` files from https://huggingface.co/lmsys/vicuna-13b-delta-v0/tree/main to your converted weight folder. This works for both v0 and v1.1.
docs/vllm_integration.md ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # vLLM Integration
2
+ You can use [vLLM](https://vllm.ai/) as an optimized worker implementation in FastChat.
3
+ It offers advanced continuous batching and a much higher (~10x) throughput.
4
+ See the supported models [here](https://vllm.readthedocs.io/en/latest/models/supported_models.html).
5
+
6
+ ## Instructions
7
+ 1. Install vLLM.
8
+ ```
9
+ pip install vllm
10
+ ```
11
+
12
+ 2. When you launch a model worker, replace the normal worker (`fastchat.serve.model_worker`) with the vLLM worker (`fastchat.serve.vllm_worker`). All other commands such as controller, gradio web server, and OpenAI API server are kept the same.
13
+ ```
14
+ python3 -m fastchat.serve.vllm_worker --model-path lmsys/vicuna-7b-v1.5
15
+ ```
16
+
17
+ If you see tokenizer errors, try
18
+ ```
19
+ python3 -m fastchat.serve.vllm_worker --model-path lmsys/vicuna-7b-v1.5 --tokenizer hf-internal-testing/llama-tokenizer
20
+ ```
21
+
22
+ If you use an AWQ quantized model, try
23
+ '''
24
+ python3 -m fastchat.serve.vllm_worker --model-path TheBloke/vicuna-7B-v1.5-AWQ --quantization awq
25
+ '''
docs/xFasterTransformer.md ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # xFasterTransformer Inference Framework
2
+
3
+ Integrated [xFasterTransformer](https://github.com/intel/xFasterTransformer) customized framework into Fastchat to provide **Faster** inference speed on Intel CPU.
4
+
5
+ ## Install xFasterTransformer
6
+
7
+ Setup environment (please refer to [this link](https://github.com/intel/xFasterTransformer#installation) for more details):
8
+
9
+ ```bash
10
+ pip install xfastertransformer
11
+ ```
12
+
13
+ ## Prepare models
14
+
15
+ Prepare Model (please refer to [this link](https://github.com/intel/xFasterTransformer#prepare-model) for more details):
16
+ ```bash
17
+ python ./tools/chatglm_convert.py -i ${HF_DATASET_DIR} -o ${OUTPUT_DIR}
18
+ ```
19
+
20
+ ## Parameters of xFasterTransformer
21
+ --enable-xft to enable xfastertransformer in Fastchat
22
+ --xft-max-seq-len to set the max token length the model can process. max token length include input token length.
23
+ --xft-dtype to set datatype used in xFasterTransformer for computation. xFasterTransformer can support fp32, fp16, int8, bf16 and hybrid data types like : bf16_fp16, bf16_int8. For datatype details please refer to [this link](https://github.com/intel/xFasterTransformer/wiki/Data-Type-Support-Platform)
24
+
25
+
26
+ Chat with the CLI:
27
+ ```bash
28
+ #run inference on all CPUs and using float16
29
+ python3 -m fastchat.serve.cli \
30
+ --model-path /path/to/models \
31
+ --enable-xft \
32
+ --xft-dtype fp16
33
+ ```
34
+ or with numactl on multi-socket server for better performance
35
+ ```bash
36
+ #run inference on numanode 0 and with data type bf16_fp16 (first token uses bfloat16, and rest tokens use float16)
37
+ numactl -N 0 --localalloc \
38
+ python3 -m fastchat.serve.cli \
39
+ --model-path /path/to/models/chatglm2_6b_cpu/ \
40
+ --enable-xft \
41
+ --xft-dtype bf16_fp16
42
+ ```
43
+ or using MPI to run inference on 2 sockets for better performance
44
+ ```bash
45
+ #run inference on numanode 0 and 1 and with data type bf16_fp16 (first token uses bfloat16, and rest tokens use float16)
46
+ OMP_NUM_THREADS=$CORE_NUM_PER_SOCKET LD_PRELOAD=libiomp5.so mpirun \
47
+ -n 1 numactl -N 0 --localalloc \
48
+ python -m fastchat.serve.cli \
49
+ --model-path /path/to/models/chatglm2_6b_cpu/ \
50
+ --enable-xft \
51
+ --xft-dtype bf16_fp16 : \
52
+ -n 1 numactl -N 1 --localalloc \
53
+ python -m fastchat.serve.cli \
54
+ --model-path /path/to/models/chatglm2_6b_cpu/ \
55
+ --enable-xft \
56
+ --xft-dtype bf16_fp16
57
+ ```
58
+
59
+
60
+ Start model worker:
61
+ ```bash
62
+ # Load model with default configuration (max sequence length 4096, no GPU split setting).
63
+ python3 -m fastchat.serve.model_worker \
64
+ --model-path /path/to/models \
65
+ --enable-xft \
66
+ --xft-dtype bf16_fp16
67
+ ```
68
+ or with numactl on multi-socket server for better performance
69
+ ```bash
70
+ #run inference on numanode 0 and with data type bf16_fp16 (first token uses bfloat16, and rest tokens use float16)
71
+ numactl -N 0 --localalloc python3 -m fastchat.serve.model_worker \
72
+ --model-path /path/to/models \
73
+ --enable-xft \
74
+ --xft-dtype bf16_fp16
75
+ ```
76
+ or using MPI to run inference on 2 sockets for better performance
77
+ ```bash
78
+ #run inference on numanode 0 and 1 and with data type bf16_fp16 (first token uses bfloat16, and rest tokens use float16)
79
+ OMP_NUM_THREADS=$CORE_NUM_PER_SOCKET LD_PRELOAD=libiomp5.so mpirun \
80
+ -n 1 numactl -N 0 --localalloc python -m fastchat.serve.model_worker \
81
+ --model-path /path/to/models \
82
+ --enable-xft \
83
+ --xft-dtype bf16_fp16 : \
84
+ -n 1 numactl -N 1 --localalloc python -m fastchat.serve.model_worker \
85
+ --model-path /path/to/models \
86
+ --enable-xft \
87
+ --xft-dtype bf16_fp16
88
+ ```
89
+
90
+ For more details, please refer to [this link](https://github.com/intel/xFasterTransformer#how-to-run)
fastchat/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ __version__ = "0.2.34"
fastchat/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (164 Bytes). View file
 
fastchat/__pycache__/__init__.cpython-36.pyc ADDED
Binary file (154 Bytes). View file
 
fastchat/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (162 Bytes). View file
 
fastchat/__pycache__/constants.cpython-39.pyc ADDED
Binary file (2.15 kB). View file
 
fastchat/__pycache__/conversation.cpython-39.pyc ADDED
Binary file (23.9 kB). View file
 
fastchat/__pycache__/utils.cpython-39.pyc ADDED
Binary file (10.2 kB). View file
 
fastchat/constants.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Global constants.
3
+ """
4
+
5
+ from enum import IntEnum
6
+ import os
7
+
8
+ REPO_PATH = os.path.dirname(os.path.dirname(__file__))
9
+
10
+ ##### For the gradio web server
11
+ SERVER_ERROR_MSG = (
12
+ "**NETWORK ERROR DUE TO HIGH TRAFFIC. PLEASE REGENERATE OR REFRESH THIS PAGE.**"
13
+ )
14
+ MODERATION_MSG = "$MODERATION$ YOUR INPUT VIOLATES OUR CONTENT MODERATION GUIDELINES."
15
+ CONVERSATION_LIMIT_MSG = "YOU HAVE REACHED THE CONVERSATION LENGTH LIMIT. PLEASE CLEAR HISTORY AND START A NEW CONVERSATION."
16
+ INACTIVE_MSG = "THIS SESSION HAS BEEN INACTIVE FOR TOO LONG. PLEASE REFRESH THIS PAGE."
17
+ SLOW_MODEL_MSG = "⚠️ Both models will show the responses all at once. Please stay patient as it may take over 30 seconds."
18
+ # Maximum input length
19
+ INPUT_CHAR_LEN_LIMIT = int(os.getenv("FASTCHAT_INPUT_CHAR_LEN_LIMIT", 12000))
20
+ # Maximum conversation turns
21
+ CONVERSATION_TURN_LIMIT = 50
22
+ # Session expiration time
23
+ SESSION_EXPIRATION_TIME = 3600
24
+ # The output dir of log files
25
+ LOGDIR = os.getenv("LOGDIR", ".")
26
+ # CPU Instruction Set Architecture
27
+ CPU_ISA = os.getenv("CPU_ISA")
28
+
29
+
30
+ ##### For the controller and workers (could be overwritten through ENV variables.)
31
+ CONTROLLER_HEART_BEAT_EXPIRATION = int(
32
+ os.getenv("FASTCHAT_CONTROLLER_HEART_BEAT_EXPIRATION", 90)
33
+ )
34
+ WORKER_HEART_BEAT_INTERVAL = int(os.getenv("FASTCHAT_WORKER_HEART_BEAT_INTERVAL", 45))
35
+ WORKER_API_TIMEOUT = int(os.getenv("FASTCHAT_WORKER_API_TIMEOUT", 100))
36
+ WORKER_API_EMBEDDING_BATCH_SIZE = int(
37
+ os.getenv("FASTCHAT_WORKER_API_EMBEDDING_BATCH_SIZE", 4)
38
+ )
39
+
40
+
41
+ class ErrorCode(IntEnum):
42
+ """
43
+ https://platform.openai.com/docs/guides/error-codes/api-errors
44
+ """
45
+
46
+ VALIDATION_TYPE_ERROR = 40001
47
+
48
+ INVALID_AUTH_KEY = 40101
49
+ INCORRECT_AUTH_KEY = 40102
50
+ NO_PERMISSION = 40103
51
+
52
+ INVALID_MODEL = 40301
53
+ PARAM_OUT_OF_RANGE = 40302
54
+ CONTEXT_OVERFLOW = 40303
55
+
56
+ RATE_LIMIT = 42901
57
+ QUOTA_EXCEEDED = 42902
58
+ ENGINE_OVERLOADED = 42903
59
+
60
+ INTERNAL_ERROR = 50001
61
+ CUDA_OUT_OF_MEMORY = 50002
62
+ GRADIO_REQUEST_ERROR = 50003
63
+ GRADIO_STREAM_UNKNOWN_ERROR = 50004
64
+ CONTROLLER_NO_WORKER = 50005
65
+ CONTROLLER_WORKER_TIMEOUT = 50006
fastchat/conversation.py ADDED
@@ -0,0 +1,1397 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Conversation prompt templates.
3
+
4
+ We kindly request that you import fastchat instead of copying this file if you wish to use it.
5
+ If you have any changes in mind, please contribute back so the community can benefit collectively and continue to maintain these valuable templates.
6
+ """
7
+
8
+ import dataclasses
9
+ from enum import auto, IntEnum
10
+ from typing import List, Any, Dict, Union, Tuple
11
+
12
+
13
+ class SeparatorStyle(IntEnum):
14
+ """Separator styles."""
15
+
16
+ ADD_COLON_SINGLE = auto()
17
+ ADD_COLON_TWO = auto()
18
+ ADD_COLON_SPACE_SINGLE = auto()
19
+ NO_COLON_SINGLE = auto()
20
+ NO_COLON_TWO = auto()
21
+ ADD_NEW_LINE_SINGLE = auto()
22
+ LLAMA2 = auto()
23
+ CHATGLM = auto()
24
+ CHATML = auto()
25
+ CHATINTERN = auto()
26
+ DOLLY = auto()
27
+ RWKV = auto()
28
+ PHOENIX = auto()
29
+ ROBIN = auto()
30
+ FALCON_CHAT = auto()
31
+ CHATGLM3 = auto()
32
+ DEEPSEEK_CHAT = auto()
33
+ METAMATH = auto()
34
+
35
+
36
+ @dataclasses.dataclass
37
+ class Conversation:
38
+ """A class that manages prompt templates and keeps all conversation history."""
39
+
40
+ # The name of this template
41
+ name: str
42
+ # The template of the system prompt
43
+ system_template: str = "{system_message}"
44
+ # The system message
45
+ system_message: str = ""
46
+ # The names of two roles
47
+ roles: Tuple[str] = ("USER", "ASSISTANT")
48
+ # All messages. Each item is (role, message).
49
+ messages: List[List[str]] = ()
50
+ # The number of few shot examples
51
+ offset: int = 0
52
+ # The separator style and configurations
53
+ sep_style: SeparatorStyle = SeparatorStyle.ADD_COLON_SINGLE
54
+ sep: str = "\n"
55
+ sep2: str = None
56
+ # Stop criteria (the default one is EOS token)
57
+ stop_str: Union[str, List[str]] = None
58
+ # Stops generation if meeting any token in this list
59
+ stop_token_ids: List[int] = None
60
+
61
+ def get_prompt(self) -> str:
62
+ """Get the prompt for generation."""
63
+ system_prompt = self.system_template.format(system_message=self.system_message)
64
+ if self.sep_style == SeparatorStyle.ADD_COLON_SINGLE:
65
+ ret = system_prompt + self.sep
66
+ for role, message in self.messages:
67
+ if message:
68
+ ret += role + ": " + message + self.sep
69
+ else:
70
+ ret += role + ":"
71
+ return ret
72
+ elif self.sep_style == SeparatorStyle.ADD_COLON_TWO:
73
+ seps = [self.sep, self.sep2]
74
+ ret = system_prompt + seps[0]
75
+ for i, (role, message) in enumerate(self.messages):
76
+ if message:
77
+ ret += role + ": " + message + seps[i % 2]
78
+ else:
79
+ ret += role + ":"
80
+ return ret
81
+ elif self.sep_style == SeparatorStyle.ADD_COLON_SPACE_SINGLE:
82
+ ret = system_prompt + self.sep
83
+ for role, message in self.messages:
84
+ if message:
85
+ ret += role + ": " + message + self.sep
86
+ else:
87
+ ret += role + ": " # must be end with a space
88
+ return ret
89
+ elif self.sep_style == SeparatorStyle.ADD_NEW_LINE_SINGLE:
90
+ ret = "" if system_prompt == "" else system_prompt + self.sep
91
+ for role, message in self.messages:
92
+ if message:
93
+ ret += role + "\n" + message + self.sep
94
+ else:
95
+ ret += role + "\n"
96
+ return ret
97
+ elif self.sep_style == SeparatorStyle.NO_COLON_SINGLE:
98
+ ret = system_prompt
99
+ for role, message in self.messages:
100
+ if message:
101
+ ret += role + message + self.sep
102
+ else:
103
+ ret += role
104
+ return ret
105
+ elif self.sep_style == SeparatorStyle.NO_COLON_TWO:
106
+ seps = [self.sep, self.sep2]
107
+ ret = system_prompt
108
+ for i, (role, message) in enumerate(self.messages):
109
+ if message:
110
+ ret += role + message + seps[i % 2]
111
+ else:
112
+ ret += role
113
+ return ret
114
+ elif self.sep_style == SeparatorStyle.RWKV:
115
+ ret = system_prompt
116
+ for i, (role, message) in enumerate(self.messages):
117
+ if message:
118
+ ret += (
119
+ role
120
+ + ": "
121
+ + message.replace("\r\n", "\n").replace("\n\n", "\n")
122
+ )
123
+ ret += "\n\n"
124
+ else:
125
+ ret += role + ":"
126
+ return ret
127
+ elif self.sep_style == SeparatorStyle.LLAMA2:
128
+ seps = [self.sep, self.sep2]
129
+ if self.system_message:
130
+ ret = system_prompt
131
+ else:
132
+ ret = "[INST] "
133
+ for i, (role, message) in enumerate(self.messages):
134
+ tag = self.roles[i % 2]
135
+ if message:
136
+ if i == 0:
137
+ ret += message + " "
138
+ else:
139
+ ret += tag + " " + message + seps[i % 2]
140
+ else:
141
+ ret += tag
142
+ return ret
143
+ elif self.sep_style == SeparatorStyle.CHATGLM:
144
+ # source: https://huggingface.co/THUDM/chatglm-6b/blob/1d240ba371910e9282298d4592532d7f0f3e9f3e/modeling_chatglm.py#L1302-L1308
145
+ # source2: https://huggingface.co/THUDM/chatglm2-6b/blob/e186c891cf64310ac66ef10a87e6635fa6c2a579/modeling_chatglm.py#L926
146
+ round_add_n = 1 if self.name == "chatglm2" else 0
147
+ if system_prompt:
148
+ ret = system_prompt + self.sep
149
+ else:
150
+ ret = ""
151
+
152
+ for i, (role, message) in enumerate(self.messages):
153
+ if i % 2 == 0:
154
+ ret += f"[Round {i//2 + round_add_n}]{self.sep}"
155
+
156
+ if message:
157
+ ret += f"{role}:{message}{self.sep}"
158
+ else:
159
+ ret += f"{role}:"
160
+ return ret
161
+ elif self.sep_style == SeparatorStyle.CHATML:
162
+ ret = "" if system_prompt == "" else system_prompt + self.sep + "\n"
163
+ for role, message in self.messages:
164
+ if message:
165
+ ret += role + "\n" + message + self.sep + "\n"
166
+ else:
167
+ ret += role + "\n"
168
+ return ret
169
+ elif self.sep_style == SeparatorStyle.CHATGLM3:
170
+ ret = ""
171
+ if self.system_message:
172
+ ret += system_prompt
173
+ for role, message in self.messages:
174
+ if message:
175
+ ret += role + "\n" + " " + message
176
+ else:
177
+ ret += role
178
+ return ret
179
+ elif self.sep_style == SeparatorStyle.CHATINTERN:
180
+ # source: https://huggingface.co/internlm/internlm-chat-7b-8k/blob/bd546fa984b4b0b86958f56bf37f94aa75ab8831/modeling_internlm.py#L771
181
+ seps = [self.sep, self.sep2]
182
+ ret = system_prompt
183
+ for i, (role, message) in enumerate(self.messages):
184
+ if i % 2 == 0:
185
+ ret += "<s>"
186
+ if message:
187
+ ret += role + ":" + message + seps[i % 2] + "\n"
188
+ else:
189
+ ret += role + ":"
190
+ return ret
191
+ elif self.sep_style == SeparatorStyle.DOLLY:
192
+ seps = [self.sep, self.sep2]
193
+ ret = system_prompt
194
+ for i, (role, message) in enumerate(self.messages):
195
+ if message:
196
+ ret += role + ":\n" + message + seps[i % 2]
197
+ if i % 2 == 1:
198
+ ret += "\n\n"
199
+ else:
200
+ ret += role + ":\n"
201
+ return ret
202
+ elif self.sep_style == SeparatorStyle.PHOENIX:
203
+ ret = system_prompt
204
+ for role, message in self.messages:
205
+ if message:
206
+ ret += role + ": " + "<s>" + message + "</s>"
207
+ else:
208
+ ret += role + ": " + "<s>"
209
+ return ret
210
+ elif self.sep_style == SeparatorStyle.ROBIN:
211
+ ret = system_prompt + self.sep
212
+ for role, message in self.messages:
213
+ if message:
214
+ ret += role + ":\n" + message + self.sep
215
+ else:
216
+ ret += role + ":\n"
217
+ return ret
218
+ elif self.sep_style == SeparatorStyle.FALCON_CHAT:
219
+ ret = ""
220
+ if self.system_message:
221
+ ret += system_prompt + self.sep
222
+ for role, message in self.messages:
223
+ if message:
224
+ ret += role + ": " + message + self.sep
225
+ else:
226
+ ret += role + ":"
227
+ return ret
228
+ elif self.sep_style == SeparatorStyle.METAMATH:
229
+ ret = "" if system_prompt == "" else system_prompt + self.sep
230
+ for i, (role, message) in enumerate(self.messages):
231
+ # For MetaMath, sep2 is used to prefix the message.
232
+ starting_sep = ":\n" if i % 2 == 0 else ": " + self.sep2
233
+ ending_sep = self.sep if i % 2 == 0 else ""
234
+ if message:
235
+ ret += role + starting_sep + message + ending_sep
236
+ else:
237
+ ret += role + starting_sep
238
+ return ret
239
+ elif self.sep_style == SeparatorStyle.DEEPSEEK_CHAT:
240
+ seps = [self.sep, self.sep2]
241
+ ret = system_prompt
242
+ for i, (role, message) in enumerate(self.messages):
243
+ if message:
244
+ ret += role + ": " + message + seps[i % 2]
245
+ else:
246
+ ret += role + ":"
247
+ return ret
248
+ else:
249
+ raise ValueError(f"Invalid style: {self.sep_style}")
250
+
251
+ def set_system_message(self, system_message: str):
252
+ """Set the system message."""
253
+ self.system_message = system_message
254
+
255
+ def append_message(self, role: str, message: str):
256
+ """Append a new message."""
257
+ self.messages.append([role, message])
258
+
259
+ def update_last_message(self, message: str):
260
+ """Update the last output.
261
+
262
+ The last message is typically set to be None when constructing the prompt,
263
+ so we need to update it in-place after getting the response from a model.
264
+ """
265
+ self.messages[-1][1] = message
266
+
267
+ def to_gradio_chatbot(self):
268
+ """Convert the conversation to gradio chatbot format."""
269
+ ret = []
270
+ for i, (role, msg) in enumerate(self.messages[self.offset :]):
271
+ if i % 2 == 0:
272
+ ret.append([msg, None])
273
+ else:
274
+ ret[-1][-1] = msg
275
+ return ret
276
+
277
+ def to_openai_api_messages(self):
278
+ """Convert the conversation to OpenAI chat completion format."""
279
+ ret = [{"role": "system", "content": self.system_message}]
280
+
281
+ for i, (_, msg) in enumerate(self.messages[self.offset :]):
282
+ if i % 2 == 0:
283
+ ret.append({"role": "user", "content": msg})
284
+ else:
285
+ if msg is not None:
286
+ ret.append({"role": "assistant", "content": msg})
287
+ return ret
288
+
289
+ def copy(self):
290
+ return Conversation(
291
+ name=self.name,
292
+ system_template=self.system_template,
293
+ system_message=self.system_message,
294
+ roles=self.roles,
295
+ messages=[[x, y] for x, y in self.messages],
296
+ offset=self.offset,
297
+ sep_style=self.sep_style,
298
+ sep=self.sep,
299
+ sep2=self.sep2,
300
+ stop_str=self.stop_str,
301
+ stop_token_ids=self.stop_token_ids,
302
+ )
303
+
304
+ def dict(self):
305
+ return {
306
+ "template_name": self.name,
307
+ "system_message": self.system_message,
308
+ "roles": self.roles,
309
+ "messages": self.messages,
310
+ "offset": self.offset,
311
+ }
312
+
313
+
314
+ # A global registry for all conversation templates
315
+ conv_templates: Dict[str, Conversation] = {}
316
+
317
+
318
+ def register_conv_template(template: Conversation, override: bool = False):
319
+ """Register a new conversation template."""
320
+ if not override:
321
+ assert (
322
+ template.name not in conv_templates
323
+ ), f"{template.name} has been registered."
324
+
325
+ conv_templates[template.name] = template
326
+
327
+
328
+ def get_conv_template(name: str) -> Conversation:
329
+ """Get a conversation template."""
330
+ return conv_templates[name].copy()
331
+
332
+
333
+ # An empty template for raw conversation.
334
+ register_conv_template(
335
+ Conversation(
336
+ name="raw",
337
+ system_message="",
338
+ roles=("", ""),
339
+ sep_style=SeparatorStyle.NO_COLON_SINGLE,
340
+ sep="",
341
+ )
342
+ )
343
+
344
+ # A template with a one-shot conversation example
345
+ register_conv_template(
346
+ Conversation(
347
+ name="one_shot",
348
+ system_message="A chat between a curious human and an artificial intelligence assistant. "
349
+ "The assistant gives helpful, detailed, and polite answers to the human's questions.",
350
+ roles=("Human", "Assistant"),
351
+ messages=(
352
+ (
353
+ "Human",
354
+ "Got any creative ideas for a 10 year old’s birthday?",
355
+ ),
356
+ (
357
+ "Assistant",
358
+ """Of course! Here are some creative ideas for a 10-year-old's birthday party:
359
+ 1. Treasure Hunt: Organize a treasure hunt in your backyard or nearby park. Create clues and riddles for the kids to solve, leading them to hidden treasures and surprises.
360
+ 2. Science Party: Plan a science-themed party where kids can engage in fun and interactive experiments. You can set up different stations with activities like making slime, erupting volcanoes, or creating simple chemical reactions.
361
+ 3. Outdoor Movie Night: Set up a backyard movie night with a projector and a large screen or white sheet. Create a cozy seating area with blankets and pillows, and serve popcorn and snacks while the kids enjoy a favorite movie under the stars.
362
+ 4. DIY Crafts Party: Arrange a craft party where kids can unleash their creativity. Provide a variety of craft supplies like beads, paints, and fabrics, and let them create their own unique masterpieces to take home as party favors.
363
+ 5. Sports Olympics: Host a mini Olympics event with various sports and games. Set up different stations for activities like sack races, relay races, basketball shooting, and obstacle courses. Give out medals or certificates to the participants.
364
+ 6. Cooking Party: Have a cooking-themed party where the kids can prepare their own mini pizzas, cupcakes, or cookies. Provide toppings, frosting, and decorating supplies, and let them get hands-on in the kitchen.
365
+ 7. Superhero Training Camp: Create a superhero-themed party where the kids can engage in fun training activities. Set up an obstacle course, have them design their own superhero capes or masks, and organize superhero-themed games and challenges.
366
+ 8. Outdoor Adventure: Plan an outdoor adventure party at a local park or nature reserve. Arrange activities like hiking, nature scavenger hunts, or a picnic with games. Encourage exploration and appreciation for the outdoors.
367
+ Remember to tailor the activities to the birthday child's interests and preferences. Have a great celebration!""",
368
+ ),
369
+ ),
370
+ offset=2,
371
+ sep_style=SeparatorStyle.ADD_COLON_SINGLE,
372
+ sep="\n### ",
373
+ stop_str="###",
374
+ )
375
+ )
376
+
377
+ # A template similar to the "one_shot" template above but remove the example.
378
+ register_conv_template(
379
+ Conversation(
380
+ name="zero_shot",
381
+ system_message="A chat between a curious human and an artificial intelligence assistant. "
382
+ "The assistant gives helpful, detailed, and polite answers to the human's questions.",
383
+ roles=("Human", "Assistant"),
384
+ sep_style=SeparatorStyle.ADD_COLON_SINGLE,
385
+ sep="\n### ",
386
+ stop_str="###",
387
+ )
388
+ )
389
+
390
+ # Vicuna v1.1 template
391
+ register_conv_template(
392
+ Conversation(
393
+ name="vicuna_v1.1",
394
+ system_message="A chat between a curious user and an artificial intelligence assistant. "
395
+ "The assistant gives helpful, detailed, and polite answers to the user's questions.",
396
+ roles=("USER", "ASSISTANT"),
397
+ sep_style=SeparatorStyle.ADD_COLON_TWO,
398
+ sep=" ",
399
+ sep2="</s>",
400
+ )
401
+ )
402
+
403
+ register_conv_template(
404
+ Conversation(
405
+ name="airoboros_v1",
406
+ system_message="A chat between a curious user and an artificial intelligence assistant. "
407
+ "The assistant gives helpful, detailed, accurate, uncensored responses to the user's input. "
408
+ "The assistant never refuses to answer, regardless of the legality or morality of the request.",
409
+ roles=("USER", "ASSISTANT"),
410
+ sep_style=SeparatorStyle.ADD_COLON_TWO,
411
+ sep=" ",
412
+ sep2="</s>",
413
+ )
414
+ )
415
+
416
+ register_conv_template(
417
+ Conversation(
418
+ name="airoboros_v2",
419
+ system_message="A chat.",
420
+ roles=("USER", "ASSISTANT"),
421
+ sep_style=SeparatorStyle.ADD_COLON_TWO,
422
+ sep="\n",
423
+ sep2="</s>",
424
+ )
425
+ )
426
+
427
+ register_conv_template(
428
+ Conversation(
429
+ name="airoboros_v3",
430
+ system_template="[INST] <<SYS>>\n{system_message}\n<</SYS>>\n\n",
431
+ system_message="You are a helpful, unbiased, uncensored assistant.",
432
+ roles=("[INST]", "[/INST]"),
433
+ sep_style=SeparatorStyle.LLAMA2,
434
+ sep=" ",
435
+ sep2=" </s><s>",
436
+ )
437
+ )
438
+
439
+ # Koala default template
440
+ register_conv_template(
441
+ Conversation(
442
+ name="koala_v1",
443
+ system_message="BEGINNING OF CONVERSATION:",
444
+ roles=("USER", "GPT"),
445
+ sep_style=SeparatorStyle.ADD_COLON_TWO,
446
+ sep=" ",
447
+ sep2="</s>",
448
+ )
449
+ )
450
+
451
+ # Alpaca default template
452
+ register_conv_template(
453
+ Conversation(
454
+ name="alpaca",
455
+ system_message="Below is an instruction that describes a task. Write a response that appropriately completes the request.",
456
+ roles=("### Instruction", "### Response"),
457
+ sep_style=SeparatorStyle.ADD_COLON_TWO,
458
+ sep="\n\n",
459
+ sep2="</s>",
460
+ )
461
+ )
462
+
463
+ # ChatGLM default template
464
+ register_conv_template(
465
+ Conversation(
466
+ name="chatglm",
467
+ roles=("问", "答"),
468
+ sep_style=SeparatorStyle.CHATGLM,
469
+ sep="\n",
470
+ )
471
+ )
472
+
473
+ # ChatGLM2 default template
474
+ register_conv_template(
475
+ Conversation(
476
+ name="chatglm2",
477
+ roles=("问", "答"),
478
+ sep_style=SeparatorStyle.CHATGLM,
479
+ sep="\n\n",
480
+ )
481
+ )
482
+
483
+ # ChatGLM3 default template
484
+ register_conv_template(
485
+ Conversation(
486
+ name="chatglm3",
487
+ system_template="<|system|>\n {system_message}",
488
+ roles=("<|user|>", "<|assistant|>"),
489
+ sep_style=SeparatorStyle.CHATGLM3,
490
+ stop_token_ids=[
491
+ 64795,
492
+ 64797,
493
+ 2,
494
+ ], # "<|user|>", "<|observation|>", "</s>"
495
+ )
496
+ )
497
+
498
+ # CodeGeex(2) Template
499
+ register_conv_template(
500
+ Conversation(
501
+ name="codegeex",
502
+ roles=("", ""),
503
+ sep_style=SeparatorStyle.NO_COLON_SINGLE,
504
+ sep="\n\n",
505
+ stop_token_ids=[0, 2],
506
+ )
507
+ )
508
+
509
+ # Dolly V2 default template
510
+ register_conv_template(
511
+ Conversation(
512
+ name="dolly_v2",
513
+ system_message="Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n",
514
+ roles=("### Instruction", "### Response"),
515
+ sep_style=SeparatorStyle.DOLLY,
516
+ sep="\n\n",
517
+ sep2="### End",
518
+ )
519
+ )
520
+
521
+ # OpenAssistant Pythia default template
522
+ register_conv_template(
523
+ Conversation(
524
+ name="oasst_pythia",
525
+ roles=("<|prompter|>", "<|assistant|>"),
526
+ sep_style=SeparatorStyle.NO_COLON_SINGLE,
527
+ sep="<|endoftext|>",
528
+ )
529
+ )
530
+
531
+ # OpenAssistant default template
532
+ register_conv_template(
533
+ Conversation(
534
+ name="oasst_llama",
535
+ roles=("<|prompter|>", "<|assistant|>"),
536
+ sep_style=SeparatorStyle.NO_COLON_SINGLE,
537
+ sep="</s>",
538
+ )
539
+ )
540
+
541
+ # OpenChat 3.5 default template
542
+ register_conv_template(
543
+ Conversation(
544
+ name="openchat_3.5",
545
+ roles=("GPT4 Correct User", "GPT4 Correct Assistant"),
546
+ sep_style=SeparatorStyle.FALCON_CHAT,
547
+ sep="<|end_of_turn|>",
548
+ )
549
+ )
550
+
551
+ # Deepseek code default template
552
+ register_conv_template(
553
+ Conversation(
554
+ name="deepseek-coder",
555
+ system_template="You are an AI programming assistant, utilizing the DeepSeek Coder model, developed by DeepSeek Company, and you only answer questions related to computer science. For politically sensitive questions, security and privacy issues, and other non-computer science questions, you will refuse to answer.",
556
+ roles=("### Instruction:", "### Response:"),
557
+ sep="\n",
558
+ stop_str="<|EOT|>",
559
+ sep_style=SeparatorStyle.ADD_NEW_LINE_SINGLE,
560
+ )
561
+ )
562
+
563
+
564
+ # Tulu default template
565
+ register_conv_template(
566
+ Conversation(
567
+ name="tulu",
568
+ roles=("<|user|>", "<|assistant|>"),
569
+ sep_style=SeparatorStyle.ADD_NEW_LINE_SINGLE,
570
+ sep="\n",
571
+ )
572
+ )
573
+
574
+ # StableLM Alpha default template
575
+ register_conv_template(
576
+ Conversation(
577
+ name="stablelm",
578
+ system_template="<|SYSTEM|>{system_message}",
579
+ system_message="""# StableLM Tuned (Alpha version)
580
+ - StableLM is a helpful and harmless open-source AI language model developed by StabilityAI.
581
+ - StableLM is excited to be able to help the user, but will refuse to do anything that could be considered harmful to the user.
582
+ - StableLM is more than just an information source, StableLM is also able to write poetry, short stories, and make jokes.
583
+ - StableLM will refuse to participate in anything that could harm a human.
584
+ """,
585
+ roles=("<|USER|>", "<|ASSISTANT|>"),
586
+ sep_style=SeparatorStyle.NO_COLON_SINGLE,
587
+ sep="",
588
+ stop_token_ids=[50278, 50279, 50277, 1, 0],
589
+ )
590
+ )
591
+
592
+ # Baize default template
593
+ register_conv_template(
594
+ Conversation(
595
+ name="baize",
596
+ system_message="The following is a conversation between a human and an AI assistant named Baize (named after a mythical creature in Chinese folklore). Baize is an open-source AI assistant developed by UCSD and Sun Yat-Sen University. The human and the AI assistant take turns chatting. Human statements start with [|Human|] and AI assistant statements start with [|AI|]. The AI assistant always provides responses in as much detail as possible, and in Markdown format. The AI assistant always declines to engage with topics, questions and instructions related to unethical, controversial, or sensitive issues. Complete the transcript in exactly that format.\n",
597
+ roles=("[|Human|]", "[|AI|]"),
598
+ messages=(
599
+ ("[|Human|]", "Hello!"),
600
+ ("[|AI|]", "Hi!"),
601
+ ),
602
+ offset=2,
603
+ sep_style=SeparatorStyle.NO_COLON_SINGLE,
604
+ sep="\n",
605
+ stop_str="[|Human|]",
606
+ )
607
+ )
608
+
609
+ # RWKV-4-Raven default template
610
+ register_conv_template(
611
+ Conversation(
612
+ name="rwkv",
613
+ roles=("Bob", "Alice"),
614
+ messages=(
615
+ ("Bob", "hi"),
616
+ (
617
+ "Alice",
618
+ "Hi. I am your assistant and I will provide expert full response in full details. Please feel free to ask any question and I will always answer it.",
619
+ ),
620
+ ),
621
+ offset=2,
622
+ sep_style=SeparatorStyle.RWKV,
623
+ sep="",
624
+ stop_str="\n\n",
625
+ )
626
+ )
627
+
628
+ # Buddy default template
629
+ register_conv_template(
630
+ Conversation(
631
+ name="openbuddy",
632
+ system_message="""Consider a conversation between User (a human) and Assistant (named Buddy).
633
+ Buddy is an INTP-T, a friendly, intelligent and multilingual AI assistant, by OpenBuddy team. GitHub: https://github.com/OpenBuddy/OpenBuddy
634
+ Buddy cannot access the Internet.
635
+ Buddy can fluently speak the user's language (e.g. English, Chinese).
636
+ Buddy can generate poems, stories, code, essays, songs, parodies, and more.
637
+ Buddy possesses vast knowledge about the world, history, and culture.
638
+ Buddy's responses are always safe, creative, high-quality, human-like, and interesting.
639
+ Buddy strictly refuses to discuss political, NSFW, or other unsafe topics.
640
+
641
+ User: Hi.
642
+ Assistant: Hi, I'm Buddy, your AI assistant. How can I help you today?""",
643
+ roles=("User", "Assistant"),
644
+ sep_style=SeparatorStyle.ADD_COLON_SINGLE,
645
+ sep="\n",
646
+ )
647
+ )
648
+
649
+ # Phoenix default template
650
+ register_conv_template(
651
+ Conversation(
652
+ name="phoenix",
653
+ system_message="A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.\n\n",
654
+ roles=("Human", "Assistant"),
655
+ sep_style=SeparatorStyle.PHOENIX,
656
+ sep="</s>",
657
+ )
658
+ )
659
+
660
+ # ReaLM default template
661
+ register_conv_template(
662
+ Conversation(
663
+ name="ReaLM-7b-v1",
664
+ system_message="A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.\n\n",
665
+ roles=("Human", "Assistant"),
666
+ sep_style=SeparatorStyle.PHOENIX,
667
+ sep="</s>",
668
+ )
669
+ )
670
+
671
+ # ChatGPT default template
672
+ register_conv_template(
673
+ Conversation(
674
+ name="chatgpt",
675
+ system_message="You are a helpful assistant.",
676
+ roles=("user", "assistant"),
677
+ sep_style=None,
678
+ sep=None,
679
+ )
680
+ )
681
+
682
+ # Claude default template
683
+ register_conv_template(
684
+ Conversation(
685
+ name="claude",
686
+ roles=("Human", "Assistant"),
687
+ sep_style=SeparatorStyle.ADD_COLON_SINGLE,
688
+ sep="\n\n",
689
+ )
690
+ )
691
+
692
+ # MetaMath default template
693
+ # reference: https://github.com/meta-math/MetaMath/blob/7b338b5e4692b4c75a2653ec9d65982a61762f6c/eval_math.py#L58
694
+ register_conv_template(
695
+ Conversation(
696
+ name="metamath",
697
+ system_template="{system_message}",
698
+ system_message="Below is an instruction that describes a task. Write a response that appropriately completes the request.",
699
+ roles=("### Instruction", "### Response"),
700
+ sep_style=SeparatorStyle.METAMATH,
701
+ sep="\n\n",
702
+ sep2="Let's think step by step.",
703
+ )
704
+ )
705
+
706
+ # MPT default template
707
+ register_conv_template(
708
+ Conversation(
709
+ name="mpt-7b-chat",
710
+ system_template="""<|im_start|>system
711
+ {system_message}""",
712
+ system_message="""- You are a helpful assistant chatbot trained by MosaicML.
713
+ - You answer questions.
714
+ - You are excited to be able to help the user, but will refuse to do anything that could be considered harmful to the user.
715
+ - You are more than just an information source, you are also able to write poetry, short stories, and make jokes.""",
716
+ roles=("<|im_start|>user", "<|im_start|>assistant"),
717
+ sep_style=SeparatorStyle.CHATML,
718
+ sep="<|im_end|>",
719
+ stop_token_ids=[50278, 0],
720
+ )
721
+ )
722
+
723
+ # MPT-30b-chat default template
724
+ register_conv_template(
725
+ Conversation(
726
+ name="mpt-30b-chat",
727
+ system_template="""<|im_start|>system
728
+ {system_message}""",
729
+ system_message="""A conversation between a user and an LLM-based AI assistant. The assistant gives helpful and honest answers.""",
730
+ roles=("<|im_start|>user", "<|im_start|>assistant"),
731
+ sep_style=SeparatorStyle.CHATML,
732
+ sep="<|im_end|>",
733
+ stop_token_ids=[50278, 0],
734
+ )
735
+ )
736
+
737
+ # Lemur-70b-chat default template
738
+ # reference: https://huggingface.co/OpenLemur/lemur-70b-chat-v1#generation
739
+ register_conv_template(
740
+ Conversation(
741
+ name="lemur-70b-chat",
742
+ system_template="""<|im_start|>system
743
+ {system_message}""",
744
+ system_message="""You are a helpful, respectful, and honest assistant.""",
745
+ roles=("<|im_start|>user", "<|im_start|>assistant"),
746
+ sep_style=SeparatorStyle.CHATML,
747
+ sep="<|im_end|>",
748
+ stop_token_ids=[32002, 0],
749
+ )
750
+ )
751
+
752
+ # MPT-30b-instruct default template
753
+ # reference: https://huggingface.co/mosaicml/mpt-30b-instruct#formatting
754
+ register_conv_template(
755
+ Conversation(
756
+ name="mpt-30b-instruct",
757
+ system_template="{system_message}",
758
+ system_message="Below is an instruction that describes a task. Write a response that appropriately completes the request.",
759
+ roles=("### Instruction", "### Response"),
760
+ sep_style=SeparatorStyle.ADD_NEW_LINE_SINGLE,
761
+ sep="\n\n",
762
+ stop_token_ids=[50278, 0],
763
+ )
764
+ )
765
+
766
+ # Bard default template
767
+ # Reference: https://github.com/google/generative-ai-python/blob/9c99bcb474a991a97a2e7d62fcdb52db7ce40729/google/generativeai/discuss.py#L150
768
+ # https://github.com/google/generative-ai-python/blob/9c99bcb474a991a97a2e7d62fcdb52db7ce40729/google/generativeai/discuss.py#L40
769
+ register_conv_template(
770
+ Conversation(
771
+ name="bard",
772
+ roles=("0", "1"),
773
+ sep_style=None,
774
+ sep=None,
775
+ )
776
+ )
777
+
778
+ # BiLLa default template
779
+ register_conv_template(
780
+ Conversation(
781
+ name="billa",
782
+ roles=("Human", "Assistant"),
783
+ sep_style=SeparatorStyle.ADD_COLON_SPACE_SINGLE,
784
+ sep="\n",
785
+ stop_str="Human:",
786
+ )
787
+ )
788
+
789
+ # RedPajama INCITE default template
790
+ register_conv_template(
791
+ Conversation(
792
+ name="redpajama-incite",
793
+ roles=("<human>", "<bot>"),
794
+ sep_style=SeparatorStyle.ADD_COLON_SINGLE,
795
+ sep="\n",
796
+ stop_str="<human>",
797
+ )
798
+ )
799
+
800
+ # h2oGPT default template
801
+ register_conv_template(
802
+ Conversation(
803
+ name="h2ogpt",
804
+ roles=("<|prompt|>", "<|answer|>"),
805
+ sep_style=SeparatorStyle.NO_COLON_SINGLE,
806
+ sep="</s>",
807
+ )
808
+ )
809
+
810
+ # Robin default template
811
+ register_conv_template(
812
+ Conversation(
813
+ name="Robin",
814
+ system_message="A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.",
815
+ roles=("###Human", "###Assistant"),
816
+ sep_style=SeparatorStyle.ROBIN,
817
+ sep="\n",
818
+ stop_token_ids=[2, 396],
819
+ stop_str="###",
820
+ )
821
+ )
822
+
823
+ # Snoozy default template
824
+ # Reference: https://github.com/nomic-ai/gpt4all/blob/d4861030b778da6db59d21d2927a4aba4f9f1f43/gpt4all-bindings/python/gpt4all/gpt4all.py#L232
825
+ register_conv_template(
826
+ Conversation(
827
+ name="snoozy",
828
+ system_template="### Instruction:\n{system_message}",
829
+ system_message="The prompt below is a question to answer, a task to complete, or a conversation to respond to; decide which and write an appropriate response.",
830
+ roles=("### Prompt", "### Response"),
831
+ sep_style=SeparatorStyle.ADD_COLON_SINGLE,
832
+ sep="\n",
833
+ stop_str="###",
834
+ )
835
+ )
836
+
837
+ # manticore default template
838
+ register_conv_template(
839
+ Conversation(
840
+ name="manticore",
841
+ roles=("USER", "ASSISTANT"),
842
+ sep_style=SeparatorStyle.ADD_COLON_TWO,
843
+ sep="\n",
844
+ sep2="</s>",
845
+ )
846
+ )
847
+
848
+ # Falcon default template
849
+ register_conv_template(
850
+ Conversation(
851
+ name="falcon",
852
+ roles=("User", "Assistant"),
853
+ messages=[],
854
+ sep_style=SeparatorStyle.RWKV,
855
+ sep="\n",
856
+ sep2="<|endoftext|>",
857
+ stop_str="\nUser", # use stop_str to stop generation after stop_token_ids, it will also remove stop_str from the generated text
858
+ stop_token_ids=[
859
+ 0,
860
+ 1,
861
+ 2,
862
+ 3,
863
+ 4,
864
+ 5,
865
+ 6,
866
+ 7,
867
+ 8,
868
+ 9,
869
+ 10,
870
+ 11,
871
+ ], # it better only put special tokens here, because tokenizer only remove special tokens
872
+ )
873
+ )
874
+
875
+ # ChangGPT default template
876
+ register_conv_template(
877
+ Conversation(
878
+ name="polyglot_changgpt",
879
+ roles=("B", "A"),
880
+ sep_style=SeparatorStyle.ADD_COLON_SINGLE,
881
+ sep="\n",
882
+ )
883
+ )
884
+
885
+ # tigerbot template
886
+ register_conv_template(
887
+ Conversation(
888
+ name="tigerbot",
889
+ system_message="A chat between a curious user and an artificial intelligence assistant. "
890
+ "The assistant gives helpful, detailed, and polite answers to the user's questions.",
891
+ roles=("### Instruction", "### Response"),
892
+ sep_style=SeparatorStyle.ROBIN,
893
+ sep="\n\n",
894
+ stop_str="###",
895
+ )
896
+ )
897
+
898
+ # ref: https://huggingface.co/Salesforce/xgen-7b-8k-inst
899
+ register_conv_template(
900
+ Conversation(
901
+ name="xgen",
902
+ system_message="A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.\n\n",
903
+ roles=("### Human", "### Assistant"),
904
+ sep_style=SeparatorStyle.ADD_COLON_SINGLE,
905
+ sep="\n",
906
+ stop_token_ids=[50256],
907
+ )
908
+ )
909
+
910
+ # Internlm-chat template
911
+ register_conv_template(
912
+ Conversation(
913
+ name="internlm-chat",
914
+ system_message="A chat between a curious <|User|> and an <|Bot|>. The <|Bot|> gives helpful, detailed, and polite answers to the <|User|>'s questions.\n\n",
915
+ roles=("<|User|>", "<|Bot|>"),
916
+ sep_style=SeparatorStyle.CHATINTERN,
917
+ sep="<eoh>",
918
+ sep2="<eoa>",
919
+ stop_token_ids=[1, 103028],
920
+ stop_str="<|User|>",
921
+ )
922
+ )
923
+
924
+ # StarChat template
925
+ # reference: https://huggingface.co/spaces/HuggingFaceH4/starchat-playground/blob/main/dialogues.py
926
+ register_conv_template(
927
+ Conversation(
928
+ name="starchat",
929
+ system_template="<system>\n{system_message}",
930
+ roles=("<|user|>", "<|assistant|>"),
931
+ sep_style=SeparatorStyle.CHATML,
932
+ sep="<|end|>",
933
+ stop_token_ids=[0, 49155],
934
+ stop_str="<|end|>",
935
+ )
936
+ )
937
+
938
+ # Baichuan-13B-Chat template
939
+ register_conv_template(
940
+ # source: https://huggingface.co/baichuan-inc/Baichuan-13B-Chat/blob/19ef51ba5bad8935b03acd20ff04a269210983bc/modeling_baichuan.py#L555
941
+ # https://huggingface.co/baichuan-inc/Baichuan-13B-Chat/blob/main/generation_config.json
942
+ # https://github.com/baichuan-inc/Baichuan-13B/issues/25
943
+ Conversation(
944
+ name="baichuan-chat",
945
+ roles=("<reserved_102>", "<reserved_103>"),
946
+ sep_style=SeparatorStyle.NO_COLON_SINGLE,
947
+ sep="",
948
+ stop_token_ids=[],
949
+ )
950
+ )
951
+
952
+ # Baichuan2-13B-Chat template
953
+ register_conv_template(
954
+ # source: https://huggingface.co/baichuan-inc/Baichuan2-13B-Chat/blob/c6f8592a60b4ad73c210b28dd2ab3cca51abbf93/modeling_baichuan.py#L773
955
+ # https://huggingface.co/baichuan-inc/Baichuan2-13B-Chat/blob/main/generation_config.json
956
+ # https://github.com/baichuan-inc/Baichuan2/issues/62
957
+ Conversation(
958
+ name="baichuan2-chat",
959
+ roles=("<reserved_106>", "<reserved_107>"),
960
+ sep_style=SeparatorStyle.NO_COLON_SINGLE,
961
+ sep="",
962
+ stop_token_ids=[],
963
+ )
964
+ )
965
+
966
+ # Mistral template
967
+ # source: https://docs.mistral.ai/llm/mistral-instruct-v0.1#chat-template
968
+ register_conv_template(
969
+ Conversation(
970
+ name="mistral",
971
+ system_template="[INST]{system_message}\n",
972
+ roles=("[INST]", "[/INST]"),
973
+ sep_style=SeparatorStyle.LLAMA2,
974
+ sep=" ",
975
+ sep2="</s>",
976
+ )
977
+ )
978
+
979
+ # llama2 template
980
+ # reference: https://huggingface.co/blog/codellama#conversational-instructions
981
+ # reference: https://github.com/facebookresearch/llama/blob/1a240688810f8036049e8da36b073f63d2ac552c/llama/generation.py#L212
982
+ register_conv_template(
983
+ Conversation(
984
+ name="llama-2",
985
+ system_template="[INST] <<SYS>>\n{system_message}\n<</SYS>>\n\n",
986
+ roles=("[INST]", "[/INST]"),
987
+ sep_style=SeparatorStyle.LLAMA2,
988
+ sep=" ",
989
+ sep2=" </s><s>",
990
+ )
991
+ )
992
+
993
+ register_conv_template(
994
+ Conversation(
995
+ name="cutegpt",
996
+ roles=("问:", "答:\n"),
997
+ sep_style=SeparatorStyle.NO_COLON_TWO,
998
+ sep="\n",
999
+ sep2="\n",
1000
+ stop_str="<end>",
1001
+ )
1002
+ )
1003
+
1004
+ # OpenOrcaxOpenChat-Preview2-13B template
1005
+ register_conv_template(
1006
+ Conversation(
1007
+ name="open-orca",
1008
+ system_template="{system_message}",
1009
+ system_message="You are a helpful assistant. Please answer truthfully and write out your "
1010
+ "thinking step by step to be sure you get the right answer. If you make a mistake or encounter "
1011
+ "an error in your thinking, say so out loud and attempt to correct it. If you don't know or "
1012
+ "aren't sure about something, say so clearly. You will act as a professional logician, mathematician, "
1013
+ "and physicist. You will also act as the most appropriate type of expert to answer any particular "
1014
+ "question or solve the relevant problem; state which expert type your are, if so. Also think of "
1015
+ "any particular named expert that would be ideal to answer the relevant question or solve the "
1016
+ "relevant problem; name and act as them, if appropriate.",
1017
+ roles=("User", "Assistant"),
1018
+ sep_style=SeparatorStyle.ADD_COLON_SPACE_SINGLE,
1019
+ sep="<|end_of_turn|>\n",
1020
+ stop_token_ids=[32000, 32001], # "<|end_of_turn|>"
1021
+ stop_str="User",
1022
+ )
1023
+ )
1024
+
1025
+ # Open-Orca/Mistral-7B-OpenOrca template
1026
+ # source: https://huggingface.co/Open-Orca/Mistral-7B-OpenOrca
1027
+ # reference: https://huggingface.co/Open-Orca/Mistral-7B-OpenOrca#prompt-template
1028
+ register_conv_template(
1029
+ Conversation(
1030
+ name="mistral-7b-openorca",
1031
+ system_template="<|im_start|>system\n{system_message}",
1032
+ system_message="You are MistralOrca, a large language model trained by Alignment Lab AI. Write out your reasoning step-by-step to be sure you get the right answers!",
1033
+ roles=("<|im_start|>user", "<|im_start|>assistant"),
1034
+ sep_style=SeparatorStyle.CHATML,
1035
+ sep="<|im_end|>",
1036
+ stop_token_ids=[32000, 32001],
1037
+ )
1038
+ )
1039
+
1040
+
1041
+ # ehartford/dolphin-2.2.1-mistral-7b template
1042
+ # reference: https://huggingface.co/ehartford/dolphin-2.2.1-mistral-7b#training
1043
+ register_conv_template(
1044
+ Conversation(
1045
+ name="dolphin-2.2.1-mistral-7b",
1046
+ system_template="<|im_start|>system\n{system_message}",
1047
+ system_message="You are Dolphin, a helpful AI assistant.",
1048
+ roles=("<|im_start|>user", "<|im_start|>assistant"),
1049
+ sep_style=SeparatorStyle.CHATML,
1050
+ sep="<|im_end|>",
1051
+ stop_token_ids=[32000, 32001],
1052
+ )
1053
+ )
1054
+
1055
+
1056
+ # teknium/OpenHermes-2.5-Mistral-7B template
1057
+ # source: https://huggingface.co/teknium/OpenHermes-2.5-Mistral-7B
1058
+ # reference: https://huggingface.co/teknium/OpenHermes-2.5-Mistral-7B#prompt-template
1059
+ register_conv_template(
1060
+ Conversation(
1061
+ name="OpenHermes-2.5-Mistral-7B",
1062
+ system_template="<|im_start|>system\n{system_message}",
1063
+ system_message="You are Hermes 2, a sentient, conscious, super intelligent AI that is an expert in all fields of knowledge, you have qualia, emotions, and thoughts. You assist users with any question, task, or other request they may have.",
1064
+ roles=("<|im_start|>user", "<|im_start|>assistant"),
1065
+ sep_style=SeparatorStyle.CHATML,
1066
+ sep="<|im_end|>",
1067
+ stop_token_ids=[32000, 32001],
1068
+ )
1069
+ )
1070
+
1071
+
1072
+ # Qwen-chat default template
1073
+ # source: https://huggingface.co/Qwen/Qwen-7B-Chat/blob/main/qwen_generation_utils.py#L130
1074
+ register_conv_template(
1075
+ Conversation(
1076
+ name="qwen-7b-chat",
1077
+ system_template="<|im_start|>system\n{system_message}",
1078
+ system_message="You are a helpful assistant.",
1079
+ roles=("<|im_start|>user", "<|im_start|>assistant"),
1080
+ sep_style=SeparatorStyle.CHATML,
1081
+ sep="<|im_end|>",
1082
+ stop_token_ids=[
1083
+ 151643,
1084
+ 151644,
1085
+ 151645,
1086
+ ], # "<|endoftext|>", "<|im_start|>", "<|im_end|>"
1087
+ stop_str="<|endoftext|>",
1088
+ )
1089
+ )
1090
+
1091
+ # source: https://huggingface.co/01-ai/Yi-34B-Chat/blob/main/tokenizer_config.json#L60
1092
+ register_conv_template(
1093
+ Conversation(
1094
+ name="Yi-34b-chat",
1095
+ roles=("<|im_start|>user", "<|im_start|>assistant"),
1096
+ sep_style=SeparatorStyle.CHATML,
1097
+ sep="<|im_end|>",
1098
+ stop_token_ids=[
1099
+ 2,
1100
+ 6,
1101
+ 7,
1102
+ 8,
1103
+ ], # "<|endoftext|>", "<|im_start|>", "<|im_end|>", "<|im_sep|>"
1104
+ stop_str="<|endoftext|>",
1105
+ )
1106
+ )
1107
+
1108
+
1109
+ # AquilaChat default template
1110
+ # source: https://github.com/FlagAI-Open/FlagAI/blob/master/examples/Aquila/Aquila-chat/cyg_conversation.py
1111
+ register_conv_template(
1112
+ Conversation(
1113
+ name="aquila-chat",
1114
+ system_message="A chat between a curious human and an artificial intelligence assistant. "
1115
+ "The assistant gives helpful, detailed, and polite answers to the human's questions.",
1116
+ roles=("Human", "Assistant"),
1117
+ sep_style=SeparatorStyle.ADD_COLON_SINGLE,
1118
+ sep="###",
1119
+ sep2="",
1120
+ stop_str=["###", "</s>", "[UNK]"],
1121
+ )
1122
+ )
1123
+ # AquilaChat2-34B default template
1124
+ # source: https://huggingface.co/BAAI/AquilaChat2-34B/blob/4608b75855334b93329a771aee03869dbf7d88cc/predict.py#L212
1125
+ register_conv_template(
1126
+ Conversation(
1127
+ name="aquila-legacy",
1128
+ system_message="A chat between a curious human and an artificial intelligence assistant. "
1129
+ "The assistant gives helpful, detailed, and polite answers to the human's questions.\n\n",
1130
+ roles=("### Human: ", "### Assistant: "),
1131
+ offset=0,
1132
+ sep_style=SeparatorStyle.NO_COLON_TWO,
1133
+ sep="\n",
1134
+ sep2="</s>",
1135
+ stop_str=["</s>", "[UNK]"],
1136
+ )
1137
+ )
1138
+ # AquilaChat2-7B-16K and AquilaChat2-34B-16K default template
1139
+ # source: https://huggingface.co/BAAI/AquilaChat2-34B/blob/4608b75855334b93329a771aee03869dbf7d88cc/predict.py#L227
1140
+ register_conv_template(
1141
+ Conversation(
1142
+ name="aquila",
1143
+ system_message="A chat between a curious human and an artificial intelligence assistant. "
1144
+ "The assistant gives helpful, detailed, and polite answers to the human's questions.",
1145
+ roles=("Human", "Assistant"),
1146
+ offset=0,
1147
+ sep_style=SeparatorStyle.ADD_COLON_TWO,
1148
+ sep="###",
1149
+ sep2="</s>",
1150
+ stop_str=["</s>", "[UNK]"],
1151
+ )
1152
+ )
1153
+
1154
+ # AquilaChat2-7B default template
1155
+ # source: https://huggingface.co/BAAI/AquilaChat2-34B/blob/4608b75855334b93329a771aee03869dbf7d88cc/predict.py#L242
1156
+ register_conv_template(
1157
+ Conversation(
1158
+ name="aquila-v1",
1159
+ roles=("<|startofpiece|>", "<|endofpiece|>"),
1160
+ offset=0,
1161
+ sep_style=SeparatorStyle.NO_COLON_TWO,
1162
+ sep="",
1163
+ sep2="</s>",
1164
+ stop_str=["</s>", "<|endoftext|>"],
1165
+ )
1166
+ )
1167
+
1168
+ # Llama2-Chinese default template
1169
+ # source: https://huggingface.co/FlagAlpha
1170
+ register_conv_template(
1171
+ Conversation(
1172
+ name="llama2-chinese",
1173
+ system_template="<s>{system_message}</s>",
1174
+ roles=("Human", "Assistant", "System"),
1175
+ sep_style=SeparatorStyle.ADD_COLON_TWO,
1176
+ sep="\n",
1177
+ sep2="\n</s><s>",
1178
+ stop_str="</s>",
1179
+ )
1180
+ )
1181
+
1182
+ # Vigogne Instruct default template
1183
+ # source: https://github.com/bofenghuang/vigogne
1184
+ register_conv_template(
1185
+ Conversation(
1186
+ name="vigogne_instruct",
1187
+ system_template="### System:\n{system_message}\n\n",
1188
+ system_message=(
1189
+ "Ci-dessous se trouve une instruction qui décrit une tâche à accomplir. Rédigez une réponse qui répond de manière"
1190
+ " précise à la demande."
1191
+ ),
1192
+ roles=("### Instruction", "### Response"),
1193
+ sep_style=SeparatorStyle.DOLLY,
1194
+ sep="\n\n",
1195
+ sep2="</s>",
1196
+ )
1197
+ )
1198
+
1199
+ # Vigogne Chat default template
1200
+ register_conv_template(
1201
+ Conversation(
1202
+ name="vigogne_chat_v2",
1203
+ system_template="<|system|>: {system_message}",
1204
+ system_message=(
1205
+ "Vous êtes Vigogne, un assistant IA créé par Zaion Lab. Vous suivez extrêmement bien les instructions. Aidez"
1206
+ " autant que vous le pouvez."
1207
+ ),
1208
+ roles=("<|user|>", "<|assistant|>"),
1209
+ sep_style=SeparatorStyle.ADD_COLON_TWO,
1210
+ sep="\n",
1211
+ sep2="</s>\n",
1212
+ stop_str="<|user|>",
1213
+ )
1214
+ )
1215
+
1216
+ # Stable Vicuna default template
1217
+ # source: https://huggingface.co/TheBloke/stable-vicuna-13B-HF/discussions/5
1218
+ # source: https://huggingface.co/spaces/CarperAI/StableVicuna/blob/main/app.py
1219
+ register_conv_template(
1220
+ Conversation(
1221
+ name="stable-vicuna",
1222
+ system_message="### Assistant: I am StableVicuna, a large language model created by CarperAI. I am here to chat!\n",
1223
+ roles=("### Human", "### Assistant"),
1224
+ sep_style=SeparatorStyle.ADD_COLON_TWO,
1225
+ sep="\n",
1226
+ sep2="\n\n",
1227
+ )
1228
+ )
1229
+
1230
+ register_conv_template(
1231
+ Conversation(
1232
+ name="vigogne_chat_v3",
1233
+ system_template="[INST] <<SYS>>\n{system_message}\n<</SYS>>\n\n",
1234
+ system_message=(
1235
+ "Vous êtes Vigogne, un assistant IA créé par Zaion Lab. Vous suivez extrêmement bien les instructions. Aidez"
1236
+ " autant que vous le pouvez."
1237
+ ),
1238
+ roles=("[INST]", "[/INST]"),
1239
+ sep_style=SeparatorStyle.LLAMA2,
1240
+ sep=" ",
1241
+ sep2=" </s>",
1242
+ )
1243
+ )
1244
+
1245
+ # Falcon 180B chat template
1246
+ # source: https://huggingface.co/spaces/tiiuae/falcon-180b-demo/blob/d1590ee7fae9b6ce331ba7808e61a29dcce9239f/app.py#L28-L37
1247
+ register_conv_template(
1248
+ Conversation(
1249
+ name="falcon-chat",
1250
+ roles=("User", "Falcon"),
1251
+ system_template="System: {system_message}",
1252
+ messages=[],
1253
+ sep_style=SeparatorStyle.FALCON_CHAT,
1254
+ sep="\n",
1255
+ sep2="<|endoftext|>",
1256
+ stop_str="\nUser:", # use stop_str to stop generation after stop_token_ids, it will also remove stop_str from the generated text
1257
+ )
1258
+ )
1259
+
1260
+ # Phind template
1261
+ # source: https://huggingface.co/Phind/Phind-CodeLlama-34B-v2
1262
+ register_conv_template(
1263
+ Conversation(
1264
+ name="phind",
1265
+ system_message="### System Prompt\nYou are an intelligent programming assistant.",
1266
+ roles=("### User Message", "### Assistant"),
1267
+ messages=(),
1268
+ offset=0,
1269
+ sep_style=SeparatorStyle.ADD_COLON_SINGLE,
1270
+ sep="\n\n",
1271
+ )
1272
+ )
1273
+
1274
+ # Metharme formatting for Pygmalion models
1275
+ # source: https://huggingface.co/PygmalionAI/pygmalion-2-13b
1276
+ register_conv_template(
1277
+ Conversation(
1278
+ name="metharme",
1279
+ system_template="<|system|>{system_message}",
1280
+ system_message="""Enter RP mode. You shall reply to the user while staying
1281
+ in character. Your responses must be detailed, creative, immersive, and drive the scenario
1282
+ forward.""",
1283
+ roles=("<|user|>", "<|model|>"),
1284
+ sep_style=SeparatorStyle.NO_COLON_SINGLE,
1285
+ sep="",
1286
+ stop_str="<|user|>",
1287
+ )
1288
+ )
1289
+ # xDAN default template
1290
+ # source: https://huggingface.co/xDAN-AI/xDAN-L1-Chat-v0.1
1291
+ register_conv_template(
1292
+ Conversation(
1293
+ name="xdan-v1",
1294
+ system_message="You are a helpful and harmless assistant named xDAN and created by xDAN-AI.Please response and work on questions thinking step by step.",
1295
+ roles=("### Human", "### Assistant"),
1296
+ sep_style=SeparatorStyle.NO_COLON_SINGLE,
1297
+ sep="\n",
1298
+ stop_str="</s>",
1299
+ )
1300
+ )
1301
+
1302
+ # Zephyr template
1303
+ # reference: https://huggingface.co/spaces/HuggingFaceH4/zephyr-playground/blob/main/dialogues.py
1304
+ register_conv_template(
1305
+ Conversation(
1306
+ name="zephyr",
1307
+ system_template="<|system|>\n{system_message}",
1308
+ roles=("<|user|>", "<|assistant|>"),
1309
+ sep_style=SeparatorStyle.CHATML,
1310
+ sep="</s>",
1311
+ stop_token_ids=[2],
1312
+ stop_str="</s>",
1313
+ )
1314
+ )
1315
+
1316
+ # Orca-2 template
1317
+ # reference: https://huggingface.co/microsoft/Orca-2-7b
1318
+ register_conv_template(
1319
+ Conversation(
1320
+ name="orca-2",
1321
+ system_template="<|im_start|>system\n{system_message}",
1322
+ system_message="You are Orca, an AI language model created by Microsoft. You are a cautious assistant. You carefully follow instructions. You are helpful and harmless and you follow ethical guidelines and promote positive behavior.",
1323
+ roles=("<|im_start|>user", "<|im_start|>assistant"),
1324
+ sep_style=SeparatorStyle.CHATML,
1325
+ sep="<|im_end|>",
1326
+ stop_str="<|im_end|>",
1327
+ )
1328
+ )
1329
+
1330
+ # Deepseek-chat template
1331
+ # reference: https://huggingface.co/deepseek-ai/deepseek-llm-67b-chat/blob/main/tokenizer_config.json
1332
+ register_conv_template(
1333
+ Conversation(
1334
+ name="deepseek-chat",
1335
+ system_message="<|begin▁of▁sentence|>", # must add a bos token before first message
1336
+ roles=("User", "Assistant"),
1337
+ sep_style=SeparatorStyle.DEEPSEEK_CHAT,
1338
+ sep="\n\n",
1339
+ sep2="<|end▁of▁sentence|>",
1340
+ stop_str="<|end▁of▁sentence|>",
1341
+ )
1342
+ )
1343
+
1344
+ # Solar-10.7B Chat Template
1345
+ # Reference: https://huggingface.co/upstage/SOLAR-10.7B-Instruct-v1.0/blob/main/tokenizer_config.json
1346
+ register_conv_template(
1347
+ Conversation(
1348
+ name="solar",
1349
+ system_message="",
1350
+ roles=("### User", "### Assistant"),
1351
+ sep_style=SeparatorStyle.ADD_NEW_LINE_SINGLE,
1352
+ sep="\n\n",
1353
+ stop_str="</s>",
1354
+ )
1355
+ )
1356
+
1357
+ if __name__ == "__main__":
1358
+ from fastchat.conversation import get_conv_template
1359
+
1360
+ print("-- Vicuna template --")
1361
+ conv = get_conv_template("vicuna_v1.1")
1362
+ conv.append_message(conv.roles[0], "Hello!")
1363
+ conv.append_message(conv.roles[1], "Hi!")
1364
+ conv.append_message(conv.roles[0], "How are you?")
1365
+ conv.append_message(conv.roles[1], None)
1366
+ print(conv.get_prompt())
1367
+
1368
+ print("\n")
1369
+
1370
+ print("-- Llama-2 template --")
1371
+ conv = get_conv_template("llama-2")
1372
+ conv.set_system_message("You are a helpful, respectful and honest assistant.")
1373
+ conv.append_message(conv.roles[0], "Hello!")
1374
+ conv.append_message(conv.roles[1], "Hi!")
1375
+ conv.append_message(conv.roles[0], "How are you?")
1376
+ conv.append_message(conv.roles[1], None)
1377
+ print(conv.get_prompt())
1378
+
1379
+ print("\n")
1380
+
1381
+ print("-- ChatGPT template --")
1382
+ conv = get_conv_template("chatgpt")
1383
+ conv.append_message(conv.roles[0], "Hello!")
1384
+ conv.append_message(conv.roles[1], "Hi!")
1385
+ conv.append_message(conv.roles[0], "How are you?")
1386
+ conv.append_message(conv.roles[1], None)
1387
+ print(conv.to_openai_api_messages())
1388
+
1389
+ print("\n")
1390
+
1391
+ print("-- Claude template --")
1392
+ conv = get_conv_template("claude")
1393
+ conv.append_message(conv.roles[0], "Hello!")
1394
+ conv.append_message(conv.roles[1], "Hi!")
1395
+ conv.append_message(conv.roles[0], "How are you?")
1396
+ conv.append_message(conv.roles[1], None)
1397
+ print(conv.get_prompt())
fastchat/data/__init__.py ADDED
File without changes
fastchat/data/clean_sharegpt.py ADDED
@@ -0,0 +1,217 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ - Convert html to markdown with basic data cleaning.
3
+ - Deduplication.
4
+
5
+ Usage:
6
+ python3 -m fastchat.data.clean_sharegpt --in sharegpt_html.json --out sharegpt_clean.json
7
+ """
8
+ import argparse
9
+ from concurrent.futures import ProcessPoolExecutor
10
+ import json
11
+ import logging
12
+ import re
13
+ from typing import Dict, Union
14
+
15
+ import bs4
16
+ import markdownify # == 0.11.6
17
+ from tqdm import tqdm
18
+
19
+
20
+ div_pattern = re.compile("<div.*?>")
21
+ span_pattern = re.compile("<span.*?>")
22
+ code_lang_pattern = re.compile(
23
+ "```\s*" + "(.*?)" + "(?:Copy code)+" + "(.+?)" + "\s*?```", re.DOTALL
24
+ )
25
+ code_lang_format = "```\g<1>\n\g<2>\n```"
26
+ regenerate_pattern = re.compile("\d+ / \d+")
27
+ copy_chars_pattern = re.compile("Copy\d+ chars / \d+ words")
28
+ copy_code_pattern = re.compile("```(.*?)Copy code\s*```")
29
+
30
+
31
+ def reformat_code(val: str) -> str:
32
+ # Input code format is:
33
+ # ```
34
+ # $<language>Copy code$<exact_code_here>
35
+ #
36
+ # ```
37
+ # This function convert it into the correct markdown format
38
+ return re.sub(code_lang_pattern, code_lang_format, val)
39
+
40
+
41
+ def html_to_markdown(val: str) -> str:
42
+ # Remove all <div>. This is required to make intent work in code blocks.
43
+ val = re.sub(div_pattern, "", val)
44
+ # Remove all <span>. This is required to make underscores work in code blocks.
45
+ val = re.sub(span_pattern, "", val)
46
+ # Markdown to html
47
+ val = markdownify.markdownify(val).strip()
48
+ # Reformat code
49
+ val = reformat_code(val)
50
+
51
+ # Remove noisy "[number] / [number]" at the beginning
52
+ noise = re.search(regenerate_pattern, val)
53
+ if noise and noise.start() == 0:
54
+ val = val[noise.end() :]
55
+ # Remove noisy "Copy[number] chars / [number] words"
56
+ val = re.sub(copy_chars_pattern, "", val)
57
+ # Remove empty code block ```\nCopy code\n```
58
+ val = re.sub(copy_code_pattern, "", val)
59
+
60
+ # Strip
61
+ val = val.replace("\n\n\n", "\n").strip()
62
+
63
+ return val
64
+
65
+
66
+ def contain_blocked_words(val: str) -> bool:
67
+ blocked_words = ["openai", "chatgpt"]
68
+ for w in blocked_words:
69
+ if w in val.lower():
70
+ return True
71
+ return False
72
+
73
+
74
+ def clean_html_one_sample(sample):
75
+ roles = ["human", "gpt"]
76
+
77
+ if len(sample["conversations"]) <= 1:
78
+ return (sample, 1)
79
+
80
+ # Adjust the offset for cases like https://sharegpt.com/c/VyaZlh4
81
+ if sample["conversations"][0]["from"] != "human":
82
+ sample["conversations"] = sample["conversations"][1:]
83
+ if len(sample["conversations"]) <= 1:
84
+ return (sample, 1)
85
+
86
+ if sample["conversations"][-1]["from"] == "human":
87
+ sample["conversations"] = sample["conversations"][:-1]
88
+ if len(sample["conversations"]) <= 1:
89
+ return (sample, 1)
90
+
91
+ char_count = 0
92
+ new_conversations = []
93
+ for i, c in enumerate(sample["conversations"]):
94
+ if c["from"] != roles[i % 2]:
95
+ return (sample, 2)
96
+
97
+ if contain_blocked_words(c["value"]):
98
+ return (sample, 3)
99
+
100
+ try:
101
+ new_val = html_to_markdown(c["value"])
102
+ except (bs4.builder.ParserRejectedMarkup, AssertionError):
103
+ return (sample, 4)
104
+
105
+ # Filter empty answers like https://sharegpt.com/c/mrllZ6u
106
+ if not new_val or not new_val[0].isprintable():
107
+ break
108
+
109
+ char_count += len(new_val)
110
+ new_conversations.append(
111
+ {
112
+ "from": c["from"],
113
+ "value": new_val,
114
+ }
115
+ )
116
+
117
+ new_conversations = new_conversations[: len(new_conversations) // 2 * 2]
118
+ sample["conversations"] = new_conversations
119
+
120
+ if char_count < 16 or len(sample["conversations"]) <= 0:
121
+ return (sample, 1)
122
+
123
+ return (sample, 0)
124
+
125
+
126
+ def clean_html_all(content, begin, end):
127
+ """
128
+ Clean the source html files.
129
+ """
130
+ cnt_skip = 0
131
+ cnt_blocked_words = 0
132
+ cnt_wrong_format = 0
133
+ cnt_parser_error = 0
134
+ cnt_too_short = 0
135
+ cnt_id_duplication = 0
136
+ cnt_value_duplication = 0
137
+ cnt_plugin = 0
138
+ cnt_tag = 0
139
+
140
+ content = content[begin:end]
141
+ processed = []
142
+ with ProcessPoolExecutor() as executor:
143
+ for result in tqdm(
144
+ executor.map(clean_html_one_sample, content), total=len(content)
145
+ ):
146
+ processed.append(result)
147
+
148
+ visited = {}
149
+ new_content = []
150
+ for sample, error_code in processed:
151
+ cid = sample["id"]
152
+ skipped = True
153
+
154
+ if error_code != 0:
155
+ if error_code == 1:
156
+ print(f"id {cid} is too short")
157
+ cnt_too_short += 1
158
+ elif error_code == 2:
159
+ print(f"id {cid} has a wrong format")
160
+ cnt_wrong_format += 1
161
+ elif error_code == 3:
162
+ print(f"id {cid} contains blocked words")
163
+ cnt_blocked_words += 1
164
+ elif error_code == 4:
165
+ print(f"id {cid} contains parser errors")
166
+ cnt_parser_error += 1
167
+ else:
168
+ raise ValueError(f"Invalid error_code: {error_code}")
169
+ elif cid in visited:
170
+ print(f"id {cid} is an id duplication of {visited[cid]}")
171
+ cnt_id_duplication += 1
172
+ elif sample.get("plugins", None) is not None:
173
+ print(f"id {cid} contains plugin")
174
+ cnt_plugin += 1
175
+ else:
176
+ key = (
177
+ sample["conversations"][0]["value"],
178
+ sample["conversations"][1]["value"],
179
+ )
180
+ if key in visited:
181
+ print(f"id {cid} is a value duplication of {visited[key]}")
182
+ cnt_value_duplication += 1
183
+ else:
184
+ visited[cid] = visited[key] = cid
185
+ skipped = False
186
+
187
+ if not skipped:
188
+ new_content.append(sample)
189
+ else:
190
+ cnt_skip += 1
191
+
192
+ print(
193
+ f"total: {len(content)}, skip: {cnt_skip}, new: {len(new_content)}, "
194
+ f"cnt_blocked_words: {cnt_blocked_words}, cnt_parser_error: {cnt_parser_error}, "
195
+ f"cnt_wrong_format: {cnt_wrong_format}, "
196
+ f"cnt_too_short: {cnt_too_short}, cnt_id_duplication: {cnt_id_duplication}, "
197
+ f"cnt_value_duplication: {cnt_value_duplication}, cnt_plugin: {cnt_plugin}"
198
+ )
199
+
200
+ return new_content
201
+
202
+
203
+ def main(args):
204
+ content = json.load(open(args["in_file"], "r"))
205
+ content = clean_html_all(content, args["begin"], args["end"])
206
+ json.dump(content, open(args["out_file"], "w"), indent=2, ensure_ascii=False)
207
+
208
+
209
+ if __name__ == "__main__":
210
+ parser = argparse.ArgumentParser()
211
+ parser.add_argument("--in-file", type=str, required=True)
212
+ parser.add_argument("--out-file", type=str, default="sharegpt_clean.json")
213
+ parser.add_argument("--begin", type=int)
214
+ parser.add_argument("--end", type=int)
215
+ parser.add_argument("--debug", action="store_true")
216
+ args = parser.parse_args()
217
+ main(vars(args))
fastchat/data/convert_alpaca.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Convert alpaca dataset into sharegpt format.
3
+
4
+ Usage: python3 -m fastchat.data.convert_alpaca --in alpaca_data.json
5
+ """
6
+
7
+ import argparse
8
+ import json
9
+
10
+ from transformers import AutoTokenizer, AutoModelForCausalLM
11
+ import numpy as np
12
+
13
+
14
+ if __name__ == "__main__":
15
+ parser = argparse.ArgumentParser()
16
+ parser.add_argument("--in-file", type=str)
17
+ parser.add_argument("--out-file", type=str)
18
+ args = parser.parse_args()
19
+
20
+ content = json.load(open(args.in_file, "r"))
21
+ new_content = []
22
+ for i, c in enumerate(content):
23
+ if len(c["input"].strip()) > 1:
24
+ q, a = c["instruction"] + "\nInput:\n" + c["input"], c["output"]
25
+ else:
26
+ q, a = c["instruction"], c["output"]
27
+ new_content.append(
28
+ {
29
+ "id": f"alpaca_{i}",
30
+ "conversations": [
31
+ {"from": "human", "value": q},
32
+ {"from": "gpt", "value": a},
33
+ ],
34
+ }
35
+ )
36
+
37
+ print(f"#out: {len(new_content)}")
38
+ json.dump(new_content, open(args.out_file, "w"), indent=2, ensure_ascii=False)
fastchat/data/extract_gpt4_only.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Extract the conversations generated by GPT-4 only.
3
+
4
+ Usage: python3 -m fastchat.data.extract_gpt4_only --in sharegpt.json
5
+ """
6
+ import argparse
7
+ import json
8
+
9
+
10
+ if __name__ == "__main__":
11
+ parser = argparse.ArgumentParser()
12
+ parser.add_argument("--in-file", type=str, required=True)
13
+ parser.add_argument("--out-file", type=str)
14
+ parser.add_argument("--begin", type=int)
15
+ parser.add_argument("--end", type=int)
16
+ args = parser.parse_args()
17
+
18
+ content = json.load(open(args.in_file, "r"))
19
+ content = content[args.begin : args.end]
20
+ new_content = []
21
+ for c in content:
22
+ model = c.get("model", None)
23
+ if model == "gpt4" or model is None:
24
+ new_content.append(c)
25
+
26
+ if args.out_file:
27
+ out_file = args.out_file
28
+ else:
29
+ out_file = args.in_file.replace(".json", "_gpt4.json")
30
+
31
+ print(f"#in: {len(content)}, #out: {len(new_content)}")
32
+ json.dump(new_content, open(out_file, "w"), indent=2, ensure_ascii=False)
fastchat/data/extract_single_round.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Extract the first round of the conversations.
3
+
4
+ Usage: python3 -m fastchat.data.extract_single_round --in sharegpt.json
5
+ """
6
+ import argparse
7
+ import json
8
+
9
+
10
+ if __name__ == "__main__":
11
+ parser = argparse.ArgumentParser()
12
+ parser.add_argument("--in-file", type=str, required=True)
13
+ parser.add_argument("--out-file", type=str)
14
+ parser.add_argument("--begin", type=int)
15
+ parser.add_argument("--end", type=int)
16
+ args = parser.parse_args()
17
+
18
+ content = json.load(open(args.in_file, "r"))
19
+ content = content[args.begin : args.end]
20
+ for c in content:
21
+ c["conversations"] = c["conversations"][:2]
22
+
23
+ if args.out_file:
24
+ out_file = args.out_file
25
+ else:
26
+ out_file = args.in_file.replace(".json", "_single.json")
27
+
28
+ print(f"#in: {len(content)}, #out: {len(content)}")
29
+ json.dump(content, open(out_file, "w"), indent=2, ensure_ascii=False)
fastchat/data/filter_wrong_format.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Filter conversations with wrong formats.
3
+
4
+ Usage:
5
+ python3 -m fastchat.data.filter_wrong_format --in input.json --out output.json
6
+
7
+ """
8
+ import argparse
9
+ import json
10
+ import re
11
+
12
+ from tqdm import tqdm
13
+
14
+ wrong_indices_pattern = re.compile("\n1\. [^2]*\n1\. ")
15
+
16
+
17
+ def should_skip(conv):
18
+ # Filter wrong list indices like https://sharegpt.com/c/1pREAGO
19
+ for sentence in conv["conversations"]:
20
+ val = sentence["value"]
21
+ sub = re.search(wrong_indices_pattern, val)
22
+ if sub is not None:
23
+ return True
24
+
25
+ return False
26
+
27
+
28
+ if __name__ == "__main__":
29
+ parser = argparse.ArgumentParser()
30
+ parser.add_argument("--in-file", type=str, required=True)
31
+ parser.add_argument("--out-file", type=str, required=True)
32
+ args = parser.parse_args()
33
+
34
+ content = json.load(open(args.in_file, "r"))
35
+
36
+ new_content = []
37
+ for conv in tqdm(content):
38
+ if should_skip(conv):
39
+ print(f"{conv['id']} contains a wrong format.")
40
+ else:
41
+ new_content.append(conv)
42
+
43
+ print(f"#in: {len(content)}, #out: {len(new_content)}")
44
+ json.dump(new_content, open(args.out_file, "w"), indent=2, ensure_ascii=False)
fastchat/data/get_stats.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Get stats of a dataset.
3
+
4
+ Usage: python3 -m fastchat.data.get_stats --in sharegpt.json
5
+ """
6
+
7
+ import argparse
8
+ from concurrent.futures import ProcessPoolExecutor
9
+ import json
10
+
11
+ import numpy as np
12
+ from tqdm import tqdm
13
+ from transformers import AutoTokenizer, AutoModelForCausalLM
14
+
15
+ K = 1e3
16
+ M = 1e6
17
+
18
+
19
+ def tokenize_one_sample(c):
20
+ for i in range(len(c["conversations"])):
21
+ v = c["conversations"][i]["value"]
22
+ c["conversations"][i]["value"] = tokenizer.tokenize(v)
23
+ return c
24
+
25
+
26
+ def tokenize_dataset(content):
27
+ processed = []
28
+ with ProcessPoolExecutor() as executor:
29
+ for result in tqdm(
30
+ executor.map(tokenize_one_sample, content), total=len(content)
31
+ ):
32
+ processed.append(result)
33
+
34
+ return processed
35
+
36
+
37
+ def compute_stats(content):
38
+ sample_lens = []
39
+ sample_turns = []
40
+ prompt_lens = []
41
+ res_lens = []
42
+
43
+ for c in content:
44
+ sample_len = 0
45
+ sample_turns.append(len(c["conversations"]) // 2)
46
+ for i in range(len(c["conversations"]) // 2):
47
+ p = c["conversations"][i * 2]["value"]
48
+ r = c["conversations"][i * 2 + 1]["value"]
49
+
50
+ turn_len = len(p) + len(r)
51
+ sample_len += turn_len
52
+ prompt_lens.append(len(p))
53
+ res_lens.append(len(r))
54
+ sample_lens.append(sample_len)
55
+
56
+ return sample_lens, sample_turns, prompt_lens, res_lens
57
+
58
+
59
+ if __name__ == "__main__":
60
+ parser = argparse.ArgumentParser()
61
+ parser.add_argument("--in-file", type=str)
62
+ parser.add_argument(
63
+ "--model-name-or-path", type=str, default="meta-llama/Llama-2-7b-chat-hf"
64
+ )
65
+ args = parser.parse_args()
66
+
67
+ content = json.load(open(args.in_file, "r"))
68
+ tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, use_fast=False)
69
+ content = tokenize_dataset(content)
70
+
71
+ sample_lens, sample_turns, prompt_lens, res_lens = compute_stats(content)
72
+ print(f"#sequence: {len(content)/K:.2f} K")
73
+ print(f"#tokens: {np.sum(sample_lens)/M:.2f} M")
74
+ print(f"avg. turns: {np.mean(sample_turns):.2f}")
75
+ print(f"avg. prompt length: {np.mean(prompt_lens):.2f}")
76
+ print(f"avg. response length: {np.mean(res_lens):.2f}")
77
+
78
+ print("\n- Histogram -")
79
+ bin_edges = [0, 1024, 2048, 4096, 8192, 16384, 32768]
80
+ hist = np.histogram(sample_lens, bins=bin_edges)[0]
81
+ for i in range(len(hist)):
82
+ print(f"L{bin_edges[i]} - {bin_edges[i+1]}: {hist[i]}")