Ywung commited on
Commit
eec676d
1 Parent(s): 0db0069

add webui code

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. Dockerfile +84 -0
  2. LICENSE +661 -0
  3. api-examples/api-example-chat-stream.py +110 -0
  4. api-examples/api-example-chat.py +90 -0
  5. api-examples/api-example-model.py +176 -0
  6. api-examples/api-example-stream.py +84 -0
  7. api-examples/api-example.py +61 -0
  8. characters/Example.png +0 -0
  9. characters/Example.yaml +17 -0
  10. convert-to-safetensors.py +38 -0
  11. css/chat_style-TheEncrypted777.css +136 -0
  12. css/chat_style-cai-chat.css +58 -0
  13. css/chat_style-messenger.css +99 -0
  14. css/chat_style-wpp.css +55 -0
  15. css/html_4chan_style.css +104 -0
  16. css/html_instruct_style.css +66 -0
  17. css/html_readable_style.css +33 -0
  18. css/main.css +470 -0
  19. docker-compose.yml +33 -0
  20. docker/.dockerignore +9 -0
  21. docker/.env.example +30 -0
  22. docker/Dockerfile +72 -0
  23. docker/docker-compose.yml +33 -0
  24. docs/Audio-Notification.md +14 -0
  25. docs/Chat-mode.md +39 -0
  26. docs/DeepSpeed.md +24 -0
  27. docs/Docker.md +203 -0
  28. docs/ExLlama.md +22 -0
  29. docs/Extensions.md +244 -0
  30. docs/GPTQ-models-(4-bit-mode).md +187 -0
  31. docs/LLaMA-model.md +56 -0
  32. docs/LLaMA-v2-model.md +35 -0
  33. docs/LoRA.md +71 -0
  34. docs/Low-VRAM-guide.md +53 -0
  35. docs/README.md +21 -0
  36. docs/RWKV-model.md +72 -0
  37. docs/Spell-book.md +107 -0
  38. docs/System-requirements.md +42 -0
  39. docs/Training-LoRAs.md +174 -0
  40. docs/WSL-installation-guide.md +82 -0
  41. docs/Windows-installation-guide.md +9 -0
  42. docs/llama.cpp.md +45 -0
  43. download-model.py +275 -0
  44. extensions/api/blocking_api.py +224 -0
  45. extensions/api/requirements.txt +2 -0
  46. extensions/api/script.py +13 -0
  47. extensions/api/streaming_api.py +124 -0
  48. extensions/api/util.py +148 -0
  49. extensions/character_bias/script.py +83 -0
  50. extensions/elevenlabs_tts/outputs/outputs-will-be-saved-here.txt +0 -0
Dockerfile ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM nvidia/cuda:11.8.0-devel-ubuntu22.04 as builder
2
+
3
+ RUN apt-get update && \
4
+ apt-get install --no-install-recommends -y git vim build-essential python3-dev python3-venv && \
5
+ rm -rf /var/lib/apt/lists/*
6
+
7
+ RUN git clone https://github.com/oobabooga/GPTQ-for-LLaMa /build
8
+
9
+ WORKDIR /build
10
+
11
+ RUN python3 -m venv /build/venv
12
+ RUN . /build/venv/bin/activate && \
13
+ pip3 install --upgrade pip setuptools wheel && \
14
+ pip3 install torch torchvision torchaudio && \
15
+ pip3 install -r requirements.txt
16
+
17
+ # https://developer.nvidia.com/cuda-gpus
18
+ # for a rtx 2060: ARG TORCH_CUDA_ARCH_LIST="7.5"
19
+ ARG TORCH_CUDA_ARCH_LIST="${TORCH_CUDA_ARCH_LIST:-3.5;5.0;6.0;6.1;7.0;7.5;8.0;8.6+PTX}"
20
+ RUN . /build/venv/bin/activate && \
21
+ python3 setup_cuda.py bdist_wheel -d .
22
+
23
+ FROM nvidia/cuda:11.8.0-runtime-ubuntu22.04
24
+
25
+ LABEL maintainer="Your Name <your.email@example.com>"
26
+ LABEL description="Docker image for GPTQ-for-LLaMa and Text Generation WebUI"
27
+
28
+ RUN apt-get update && \
29
+ apt-get install --no-install-recommends -y python3-dev libportaudio2 libasound-dev git python3 python3-pip make g++ ffmpeg && \
30
+ rm -rf /var/lib/apt/lists/*
31
+
32
+ RUN --mount=type=cache,target=/root/.cache/pip pip3 install virtualenv
33
+
34
+ # Set up a new user named "user" with user ID 1000
35
+ RUN useradd -m -u 1000 user
36
+
37
+ # Switch to the "user" user
38
+ USER user
39
+
40
+ # Set home to the user's home directory
41
+ ENV HOME=/home/user \
42
+ PATH=/home/user/.local/bin:$PATH
43
+
44
+ # Set the working directory to the user's home directory
45
+ RUN mkdir $HOME/app
46
+
47
+ WORKDIR $HOME/app
48
+
49
+ ARG WEBUI_VERSION
50
+ RUN test -n "${WEBUI_VERSION}" && git reset --hard ${WEBUI_VERSION} || echo "Using provided webui source"
51
+
52
+ RUN virtualenv $HOME/app/venv
53
+ RUN . $HOME/app/venv/bin/activate && \
54
+ pip3 install --upgrade pip setuptools wheel && \
55
+ pip3 install torch torchvision torchaudio
56
+
57
+ COPY --chown=user --from=builder /build $HOME/app/repositories/GPTQ-for-LLaMa
58
+ RUN . $HOME/app/venv/bin/activate && \
59
+ pip3 install $HOME/app/repositories/GPTQ-for-LLaMa/*.whl
60
+
61
+ COPY --chown=user extensions/api/requirements.txt $HOME/app/extensions/api/requirements.txt
62
+ COPY --chown=user extensions/elevenlabs_tts/requirements.txt $HOME/app/extensions/elevenlabs_tts/requirements.txt
63
+ COPY --chown=user extensions/google_translate/requirements.txt $HOME/app/extensions/google_translate/requirements.txt
64
+ COPY --chown=user extensions/silero_tts/requirements.txt $HOME/app/extensions/silero_tts/requirements.txt
65
+ COPY --chown=user extensions/whisper_stt/requirements.txt $HOME/app/extensions/whisper_stt/requirements.txt
66
+ COPY --chown=user extensions/superbooga/requirements.txt $HOME/app/extensions/superbooga/requirements.txt
67
+ COPY --chown=user extensions/openai/requirements.txt $HOME/app/extensions/openai/requirements.txt
68
+ RUN --mount=type=cache,target=/root/.cache/pip . $HOME/app/venv/bin/activate && cd extensions/api && pip3 install -r requirements.txt
69
+ RUN --mount=type=cache,target=/root/.cache/pip . $HOME/app/venv/bin/activate && cd extensions/elevenlabs_tts && pip3 install -r requirements.txt
70
+ RUN --mount=type=cache,target=/root/.cache/pip . $HOME/app/venv/bin/activate && cd extensions/google_translate && pip3 install -r requirements.txt
71
+ RUN --mount=type=cache,target=/root/.cache/pip . $HOME/app/venv/bin/activate && cd extensions/silero_tts && pip3 install -r requirements.txt
72
+ RUN --mount=type=cache,target=/root/.cache/pip . $HOME/app/venv/bin/activate && cd extensions/whisper_stt && pip3 install -r requirements.txt
73
+ RUN --mount=type=cache,target=/root/.cache/pip . $HOME/app/venv/bin/activate && cd extensions/superbooga && pip3 install -r requirements.txt
74
+ RUN --mount=type=cache,target=/root/.cache/pip . $HOME/app/venv/bin/activate && cd extensions/openai && pip3 install -r requirements.txt
75
+
76
+ COPY --chown=user requirements.txt $HOME/app/requirements.txt
77
+ RUN . $HOME/app/venv/bin/activate && \
78
+ pip3 install -r requirements.txt
79
+
80
+ RUN cp $HOME/app/venv/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda118.so $HOME/app/venv/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cpu.so
81
+
82
+ COPY --chown=user . $HOME/app/
83
+ ENV CLI_ARGS="--listen-port 7860"
84
+ CMD . $HOME/app/venv/bin/activate && python3 server.py ${CLI_ARGS}
LICENSE ADDED
@@ -0,0 +1,661 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ GNU AFFERO GENERAL PUBLIC LICENSE
2
+ Version 3, 19 November 2007
3
+
4
+ Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
5
+ Everyone is permitted to copy and distribute verbatim copies
6
+ of this license document, but changing it is not allowed.
7
+
8
+ Preamble
9
+
10
+ The GNU Affero General Public License is a free, copyleft license for
11
+ software and other kinds of works, specifically designed to ensure
12
+ cooperation with the community in the case of network server software.
13
+
14
+ The licenses for most software and other practical works are designed
15
+ to take away your freedom to share and change the works. By contrast,
16
+ our General Public Licenses are intended to guarantee your freedom to
17
+ share and change all versions of a program--to make sure it remains free
18
+ software for all its users.
19
+
20
+ When we speak of free software, we are referring to freedom, not
21
+ price. Our General Public Licenses are designed to make sure that you
22
+ have the freedom to distribute copies of free software (and charge for
23
+ them if you wish), that you receive source code or can get it if you
24
+ want it, that you can change the software or use pieces of it in new
25
+ free programs, and that you know you can do these things.
26
+
27
+ Developers that use our General Public Licenses protect your rights
28
+ with two steps: (1) assert copyright on the software, and (2) offer
29
+ you this License which gives you legal permission to copy, distribute
30
+ and/or modify the software.
31
+
32
+ A secondary benefit of defending all users' freedom is that
33
+ improvements made in alternate versions of the program, if they
34
+ receive widespread use, become available for other developers to
35
+ incorporate. Many developers of free software are heartened and
36
+ encouraged by the resulting cooperation. However, in the case of
37
+ software used on network servers, this result may fail to come about.
38
+ The GNU General Public License permits making a modified version and
39
+ letting the public access it on a server without ever releasing its
40
+ source code to the public.
41
+
42
+ The GNU Affero General Public License is designed specifically to
43
+ ensure that, in such cases, the modified source code becomes available
44
+ to the community. It requires the operator of a network server to
45
+ provide the source code of the modified version running there to the
46
+ users of that server. Therefore, public use of a modified version, on
47
+ a publicly accessible server, gives the public access to the source
48
+ code of the modified version.
49
+
50
+ An older license, called the Affero General Public License and
51
+ published by Affero, was designed to accomplish similar goals. This is
52
+ a different license, not a version of the Affero GPL, but Affero has
53
+ released a new version of the Affero GPL which permits relicensing under
54
+ this license.
55
+
56
+ The precise terms and conditions for copying, distribution and
57
+ modification follow.
58
+
59
+ TERMS AND CONDITIONS
60
+
61
+ 0. Definitions.
62
+
63
+ "This License" refers to version 3 of the GNU Affero General Public License.
64
+
65
+ "Copyright" also means copyright-like laws that apply to other kinds of
66
+ works, such as semiconductor masks.
67
+
68
+ "The Program" refers to any copyrightable work licensed under this
69
+ License. Each licensee is addressed as "you". "Licensees" and
70
+ "recipients" may be individuals or organizations.
71
+
72
+ To "modify" a work means to copy from or adapt all or part of the work
73
+ in a fashion requiring copyright permission, other than the making of an
74
+ exact copy. The resulting work is called a "modified version" of the
75
+ earlier work or a work "based on" the earlier work.
76
+
77
+ A "covered work" means either the unmodified Program or a work based
78
+ on the Program.
79
+
80
+ To "propagate" a work means to do anything with it that, without
81
+ permission, would make you directly or secondarily liable for
82
+ infringement under applicable copyright law, except executing it on a
83
+ computer or modifying a private copy. Propagation includes copying,
84
+ distribution (with or without modification), making available to the
85
+ public, and in some countries other activities as well.
86
+
87
+ To "convey" a work means any kind of propagation that enables other
88
+ parties to make or receive copies. Mere interaction with a user through
89
+ a computer network, with no transfer of a copy, is not conveying.
90
+
91
+ An interactive user interface displays "Appropriate Legal Notices"
92
+ to the extent that it includes a convenient and prominently visible
93
+ feature that (1) displays an appropriate copyright notice, and (2)
94
+ tells the user that there is no warranty for the work (except to the
95
+ extent that warranties are provided), that licensees may convey the
96
+ work under this License, and how to view a copy of this License. If
97
+ the interface presents a list of user commands or options, such as a
98
+ menu, a prominent item in the list meets this criterion.
99
+
100
+ 1. Source Code.
101
+
102
+ The "source code" for a work means the preferred form of the work
103
+ for making modifications to it. "Object code" means any non-source
104
+ form of a work.
105
+
106
+ A "Standard Interface" means an interface that either is an official
107
+ standard defined by a recognized standards body, or, in the case of
108
+ interfaces specified for a particular programming language, one that
109
+ is widely used among developers working in that language.
110
+
111
+ The "System Libraries" of an executable work include anything, other
112
+ than the work as a whole, that (a) is included in the normal form of
113
+ packaging a Major Component, but which is not part of that Major
114
+ Component, and (b) serves only to enable use of the work with that
115
+ Major Component, or to implement a Standard Interface for which an
116
+ implementation is available to the public in source code form. A
117
+ "Major Component", in this context, means a major essential component
118
+ (kernel, window system, and so on) of the specific operating system
119
+ (if any) on which the executable work runs, or a compiler used to
120
+ produce the work, or an object code interpreter used to run it.
121
+
122
+ The "Corresponding Source" for a work in object code form means all
123
+ the source code needed to generate, install, and (for an executable
124
+ work) run the object code and to modify the work, including scripts to
125
+ control those activities. However, it does not include the work's
126
+ System Libraries, or general-purpose tools or generally available free
127
+ programs which are used unmodified in performing those activities but
128
+ which are not part of the work. For example, Corresponding Source
129
+ includes interface definition files associated with source files for
130
+ the work, and the source code for shared libraries and dynamically
131
+ linked subprograms that the work is specifically designed to require,
132
+ such as by intimate data communication or control flow between those
133
+ subprograms and other parts of the work.
134
+
135
+ The Corresponding Source need not include anything that users
136
+ can regenerate automatically from other parts of the Corresponding
137
+ Source.
138
+
139
+ The Corresponding Source for a work in source code form is that
140
+ same work.
141
+
142
+ 2. Basic Permissions.
143
+
144
+ All rights granted under this License are granted for the term of
145
+ copyright on the Program, and are irrevocable provided the stated
146
+ conditions are met. This License explicitly affirms your unlimited
147
+ permission to run the unmodified Program. The output from running a
148
+ covered work is covered by this License only if the output, given its
149
+ content, constitutes a covered work. This License acknowledges your
150
+ rights of fair use or other equivalent, as provided by copyright law.
151
+
152
+ You may make, run and propagate covered works that you do not
153
+ convey, without conditions so long as your license otherwise remains
154
+ in force. You may convey covered works to others for the sole purpose
155
+ of having them make modifications exclusively for you, or provide you
156
+ with facilities for running those works, provided that you comply with
157
+ the terms of this License in conveying all material for which you do
158
+ not control copyright. Those thus making or running the covered works
159
+ for you must do so exclusively on your behalf, under your direction
160
+ and control, on terms that prohibit them from making any copies of
161
+ your copyrighted material outside their relationship with you.
162
+
163
+ Conveying under any other circumstances is permitted solely under
164
+ the conditions stated below. Sublicensing is not allowed; section 10
165
+ makes it unnecessary.
166
+
167
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
168
+
169
+ No covered work shall be deemed part of an effective technological
170
+ measure under any applicable law fulfilling obligations under article
171
+ 11 of the WIPO copyright treaty adopted on 20 December 1996, or
172
+ similar laws prohibiting or restricting circumvention of such
173
+ measures.
174
+
175
+ When you convey a covered work, you waive any legal power to forbid
176
+ circumvention of technological measures to the extent such circumvention
177
+ is effected by exercising rights under this License with respect to
178
+ the covered work, and you disclaim any intention to limit operation or
179
+ modification of the work as a means of enforcing, against the work's
180
+ users, your or third parties' legal rights to forbid circumvention of
181
+ technological measures.
182
+
183
+ 4. Conveying Verbatim Copies.
184
+
185
+ You may convey verbatim copies of the Program's source code as you
186
+ receive it, in any medium, provided that you conspicuously and
187
+ appropriately publish on each copy an appropriate copyright notice;
188
+ keep intact all notices stating that this License and any
189
+ non-permissive terms added in accord with section 7 apply to the code;
190
+ keep intact all notices of the absence of any warranty; and give all
191
+ recipients a copy of this License along with the Program.
192
+
193
+ You may charge any price or no price for each copy that you convey,
194
+ and you may offer support or warranty protection for a fee.
195
+
196
+ 5. Conveying Modified Source Versions.
197
+
198
+ You may convey a work based on the Program, or the modifications to
199
+ produce it from the Program, in the form of source code under the
200
+ terms of section 4, provided that you also meet all of these conditions:
201
+
202
+ a) The work must carry prominent notices stating that you modified
203
+ it, and giving a relevant date.
204
+
205
+ b) The work must carry prominent notices stating that it is
206
+ released under this License and any conditions added under section
207
+ 7. This requirement modifies the requirement in section 4 to
208
+ "keep intact all notices".
209
+
210
+ c) You must license the entire work, as a whole, under this
211
+ License to anyone who comes into possession of a copy. This
212
+ License will therefore apply, along with any applicable section 7
213
+ additional terms, to the whole of the work, and all its parts,
214
+ regardless of how they are packaged. This License gives no
215
+ permission to license the work in any other way, but it does not
216
+ invalidate such permission if you have separately received it.
217
+
218
+ d) If the work has interactive user interfaces, each must display
219
+ Appropriate Legal Notices; however, if the Program has interactive
220
+ interfaces that do not display Appropriate Legal Notices, your
221
+ work need not make them do so.
222
+
223
+ A compilation of a covered work with other separate and independent
224
+ works, which are not by their nature extensions of the covered work,
225
+ and which are not combined with it such as to form a larger program,
226
+ in or on a volume of a storage or distribution medium, is called an
227
+ "aggregate" if the compilation and its resulting copyright are not
228
+ used to limit the access or legal rights of the compilation's users
229
+ beyond what the individual works permit. Inclusion of a covered work
230
+ in an aggregate does not cause this License to apply to the other
231
+ parts of the aggregate.
232
+
233
+ 6. Conveying Non-Source Forms.
234
+
235
+ You may convey a covered work in object code form under the terms
236
+ of sections 4 and 5, provided that you also convey the
237
+ machine-readable Corresponding Source under the terms of this License,
238
+ in one of these ways:
239
+
240
+ a) Convey the object code in, or embodied in, a physical product
241
+ (including a physical distribution medium), accompanied by the
242
+ Corresponding Source fixed on a durable physical medium
243
+ customarily used for software interchange.
244
+
245
+ b) Convey the object code in, or embodied in, a physical product
246
+ (including a physical distribution medium), accompanied by a
247
+ written offer, valid for at least three years and valid for as
248
+ long as you offer spare parts or customer support for that product
249
+ model, to give anyone who possesses the object code either (1) a
250
+ copy of the Corresponding Source for all the software in the
251
+ product that is covered by this License, on a durable physical
252
+ medium customarily used for software interchange, for a price no
253
+ more than your reasonable cost of physically performing this
254
+ conveying of source, or (2) access to copy the
255
+ Corresponding Source from a network server at no charge.
256
+
257
+ c) Convey individual copies of the object code with a copy of the
258
+ written offer to provide the Corresponding Source. This
259
+ alternative is allowed only occasionally and noncommercially, and
260
+ only if you received the object code with such an offer, in accord
261
+ with subsection 6b.
262
+
263
+ d) Convey the object code by offering access from a designated
264
+ place (gratis or for a charge), and offer equivalent access to the
265
+ Corresponding Source in the same way through the same place at no
266
+ further charge. You need not require recipients to copy the
267
+ Corresponding Source along with the object code. If the place to
268
+ copy the object code is a network server, the Corresponding Source
269
+ may be on a different server (operated by you or a third party)
270
+ that supports equivalent copying facilities, provided you maintain
271
+ clear directions next to the object code saying where to find the
272
+ Corresponding Source. Regardless of what server hosts the
273
+ Corresponding Source, you remain obligated to ensure that it is
274
+ available for as long as needed to satisfy these requirements.
275
+
276
+ e) Convey the object code using peer-to-peer transmission, provided
277
+ you inform other peers where the object code and Corresponding
278
+ Source of the work are being offered to the general public at no
279
+ charge under subsection 6d.
280
+
281
+ A separable portion of the object code, whose source code is excluded
282
+ from the Corresponding Source as a System Library, need not be
283
+ included in conveying the object code work.
284
+
285
+ A "User Product" is either (1) a "consumer product", which means any
286
+ tangible personal property which is normally used for personal, family,
287
+ or household purposes, or (2) anything designed or sold for incorporation
288
+ into a dwelling. In determining whether a product is a consumer product,
289
+ doubtful cases shall be resolved in favor of coverage. For a particular
290
+ product received by a particular user, "normally used" refers to a
291
+ typical or common use of that class of product, regardless of the status
292
+ of the particular user or of the way in which the particular user
293
+ actually uses, or expects or is expected to use, the product. A product
294
+ is a consumer product regardless of whether the product has substantial
295
+ commercial, industrial or non-consumer uses, unless such uses represent
296
+ the only significant mode of use of the product.
297
+
298
+ "Installation Information" for a User Product means any methods,
299
+ procedures, authorization keys, or other information required to install
300
+ and execute modified versions of a covered work in that User Product from
301
+ a modified version of its Corresponding Source. The information must
302
+ suffice to ensure that the continued functioning of the modified object
303
+ code is in no case prevented or interfered with solely because
304
+ modification has been made.
305
+
306
+ If you convey an object code work under this section in, or with, or
307
+ specifically for use in, a User Product, and the conveying occurs as
308
+ part of a transaction in which the right of possession and use of the
309
+ User Product is transferred to the recipient in perpetuity or for a
310
+ fixed term (regardless of how the transaction is characterized), the
311
+ Corresponding Source conveyed under this section must be accompanied
312
+ by the Installation Information. But this requirement does not apply
313
+ if neither you nor any third party retains the ability to install
314
+ modified object code on the User Product (for example, the work has
315
+ been installed in ROM).
316
+
317
+ The requirement to provide Installation Information does not include a
318
+ requirement to continue to provide support service, warranty, or updates
319
+ for a work that has been modified or installed by the recipient, or for
320
+ the User Product in which it has been modified or installed. Access to a
321
+ network may be denied when the modification itself materially and
322
+ adversely affects the operation of the network or violates the rules and
323
+ protocols for communication across the network.
324
+
325
+ Corresponding Source conveyed, and Installation Information provided,
326
+ in accord with this section must be in a format that is publicly
327
+ documented (and with an implementation available to the public in
328
+ source code form), and must require no special password or key for
329
+ unpacking, reading or copying.
330
+
331
+ 7. Additional Terms.
332
+
333
+ "Additional permissions" are terms that supplement the terms of this
334
+ License by making exceptions from one or more of its conditions.
335
+ Additional permissions that are applicable to the entire Program shall
336
+ be treated as though they were included in this License, to the extent
337
+ that they are valid under applicable law. If additional permissions
338
+ apply only to part of the Program, that part may be used separately
339
+ under those permissions, but the entire Program remains governed by
340
+ this License without regard to the additional permissions.
341
+
342
+ When you convey a copy of a covered work, you may at your option
343
+ remove any additional permissions from that copy, or from any part of
344
+ it. (Additional permissions may be written to require their own
345
+ removal in certain cases when you modify the work.) You may place
346
+ additional permissions on material, added by you to a covered work,
347
+ for which you have or can give appropriate copyright permission.
348
+
349
+ Notwithstanding any other provision of this License, for material you
350
+ add to a covered work, you may (if authorized by the copyright holders of
351
+ that material) supplement the terms of this License with terms:
352
+
353
+ a) Disclaiming warranty or limiting liability differently from the
354
+ terms of sections 15 and 16 of this License; or
355
+
356
+ b) Requiring preservation of specified reasonable legal notices or
357
+ author attributions in that material or in the Appropriate Legal
358
+ Notices displayed by works containing it; or
359
+
360
+ c) Prohibiting misrepresentation of the origin of that material, or
361
+ requiring that modified versions of such material be marked in
362
+ reasonable ways as different from the original version; or
363
+
364
+ d) Limiting the use for publicity purposes of names of licensors or
365
+ authors of the material; or
366
+
367
+ e) Declining to grant rights under trademark law for use of some
368
+ trade names, trademarks, or service marks; or
369
+
370
+ f) Requiring indemnification of licensors and authors of that
371
+ material by anyone who conveys the material (or modified versions of
372
+ it) with contractual assumptions of liability to the recipient, for
373
+ any liability that these contractual assumptions directly impose on
374
+ those licensors and authors.
375
+
376
+ All other non-permissive additional terms are considered "further
377
+ restrictions" within the meaning of section 10. If the Program as you
378
+ received it, or any part of it, contains a notice stating that it is
379
+ governed by this License along with a term that is a further
380
+ restriction, you may remove that term. If a license document contains
381
+ a further restriction but permits relicensing or conveying under this
382
+ License, you may add to a covered work material governed by the terms
383
+ of that license document, provided that the further restriction does
384
+ not survive such relicensing or conveying.
385
+
386
+ If you add terms to a covered work in accord with this section, you
387
+ must place, in the relevant source files, a statement of the
388
+ additional terms that apply to those files, or a notice indicating
389
+ where to find the applicable terms.
390
+
391
+ Additional terms, permissive or non-permissive, may be stated in the
392
+ form of a separately written license, or stated as exceptions;
393
+ the above requirements apply either way.
394
+
395
+ 8. Termination.
396
+
397
+ You may not propagate or modify a covered work except as expressly
398
+ provided under this License. Any attempt otherwise to propagate or
399
+ modify it is void, and will automatically terminate your rights under
400
+ this License (including any patent licenses granted under the third
401
+ paragraph of section 11).
402
+
403
+ However, if you cease all violation of this License, then your
404
+ license from a particular copyright holder is reinstated (a)
405
+ provisionally, unless and until the copyright holder explicitly and
406
+ finally terminates your license, and (b) permanently, if the copyright
407
+ holder fails to notify you of the violation by some reasonable means
408
+ prior to 60 days after the cessation.
409
+
410
+ Moreover, your license from a particular copyright holder is
411
+ reinstated permanently if the copyright holder notifies you of the
412
+ violation by some reasonable means, this is the first time you have
413
+ received notice of violation of this License (for any work) from that
414
+ copyright holder, and you cure the violation prior to 30 days after
415
+ your receipt of the notice.
416
+
417
+ Termination of your rights under this section does not terminate the
418
+ licenses of parties who have received copies or rights from you under
419
+ this License. If your rights have been terminated and not permanently
420
+ reinstated, you do not qualify to receive new licenses for the same
421
+ material under section 10.
422
+
423
+ 9. Acceptance Not Required for Having Copies.
424
+
425
+ You are not required to accept this License in order to receive or
426
+ run a copy of the Program. Ancillary propagation of a covered work
427
+ occurring solely as a consequence of using peer-to-peer transmission
428
+ to receive a copy likewise does not require acceptance. However,
429
+ nothing other than this License grants you permission to propagate or
430
+ modify any covered work. These actions infringe copyright if you do
431
+ not accept this License. Therefore, by modifying or propagating a
432
+ covered work, you indicate your acceptance of this License to do so.
433
+
434
+ 10. Automatic Licensing of Downstream Recipients.
435
+
436
+ Each time you convey a covered work, the recipient automatically
437
+ receives a license from the original licensors, to run, modify and
438
+ propagate that work, subject to this License. You are not responsible
439
+ for enforcing compliance by third parties with this License.
440
+
441
+ An "entity transaction" is a transaction transferring control of an
442
+ organization, or substantially all assets of one, or subdividing an
443
+ organization, or merging organizations. If propagation of a covered
444
+ work results from an entity transaction, each party to that
445
+ transaction who receives a copy of the work also receives whatever
446
+ licenses to the work the party's predecessor in interest had or could
447
+ give under the previous paragraph, plus a right to possession of the
448
+ Corresponding Source of the work from the predecessor in interest, if
449
+ the predecessor has it or can get it with reasonable efforts.
450
+
451
+ You may not impose any further restrictions on the exercise of the
452
+ rights granted or affirmed under this License. For example, you may
453
+ not impose a license fee, royalty, or other charge for exercise of
454
+ rights granted under this License, and you may not initiate litigation
455
+ (including a cross-claim or counterclaim in a lawsuit) alleging that
456
+ any patent claim is infringed by making, using, selling, offering for
457
+ sale, or importing the Program or any portion of it.
458
+
459
+ 11. Patents.
460
+
461
+ A "contributor" is a copyright holder who authorizes use under this
462
+ License of the Program or a work on which the Program is based. The
463
+ work thus licensed is called the contributor's "contributor version".
464
+
465
+ A contributor's "essential patent claims" are all patent claims
466
+ owned or controlled by the contributor, whether already acquired or
467
+ hereafter acquired, that would be infringed by some manner, permitted
468
+ by this License, of making, using, or selling its contributor version,
469
+ but do not include claims that would be infringed only as a
470
+ consequence of further modification of the contributor version. For
471
+ purposes of this definition, "control" includes the right to grant
472
+ patent sublicenses in a manner consistent with the requirements of
473
+ this License.
474
+
475
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
476
+ patent license under the contributor's essential patent claims, to
477
+ make, use, sell, offer for sale, import and otherwise run, modify and
478
+ propagate the contents of its contributor version.
479
+
480
+ In the following three paragraphs, a "patent license" is any express
481
+ agreement or commitment, however denominated, not to enforce a patent
482
+ (such as an express permission to practice a patent or covenant not to
483
+ sue for patent infringement). To "grant" such a patent license to a
484
+ party means to make such an agreement or commitment not to enforce a
485
+ patent against the party.
486
+
487
+ If you convey a covered work, knowingly relying on a patent license,
488
+ and the Corresponding Source of the work is not available for anyone
489
+ to copy, free of charge and under the terms of this License, through a
490
+ publicly available network server or other readily accessible means,
491
+ then you must either (1) cause the Corresponding Source to be so
492
+ available, or (2) arrange to deprive yourself of the benefit of the
493
+ patent license for this particular work, or (3) arrange, in a manner
494
+ consistent with the requirements of this License, to extend the patent
495
+ license to downstream recipients. "Knowingly relying" means you have
496
+ actual knowledge that, but for the patent license, your conveying the
497
+ covered work in a country, or your recipient's use of the covered work
498
+ in a country, would infringe one or more identifiable patents in that
499
+ country that you have reason to believe are valid.
500
+
501
+ If, pursuant to or in connection with a single transaction or
502
+ arrangement, you convey, or propagate by procuring conveyance of, a
503
+ covered work, and grant a patent license to some of the parties
504
+ receiving the covered work authorizing them to use, propagate, modify
505
+ or convey a specific copy of the covered work, then the patent license
506
+ you grant is automatically extended to all recipients of the covered
507
+ work and works based on it.
508
+
509
+ A patent license is "discriminatory" if it does not include within
510
+ the scope of its coverage, prohibits the exercise of, or is
511
+ conditioned on the non-exercise of one or more of the rights that are
512
+ specifically granted under this License. You may not convey a covered
513
+ work if you are a party to an arrangement with a third party that is
514
+ in the business of distributing software, under which you make payment
515
+ to the third party based on the extent of your activity of conveying
516
+ the work, and under which the third party grants, to any of the
517
+ parties who would receive the covered work from you, a discriminatory
518
+ patent license (a) in connection with copies of the covered work
519
+ conveyed by you (or copies made from those copies), or (b) primarily
520
+ for and in connection with specific products or compilations that
521
+ contain the covered work, unless you entered into that arrangement,
522
+ or that patent license was granted, prior to 28 March 2007.
523
+
524
+ Nothing in this License shall be construed as excluding or limiting
525
+ any implied license or other defenses to infringement that may
526
+ otherwise be available to you under applicable patent law.
527
+
528
+ 12. No Surrender of Others' Freedom.
529
+
530
+ If conditions are imposed on you (whether by court order, agreement or
531
+ otherwise) that contradict the conditions of this License, they do not
532
+ excuse you from the conditions of this License. If you cannot convey a
533
+ covered work so as to satisfy simultaneously your obligations under this
534
+ License and any other pertinent obligations, then as a consequence you may
535
+ not convey it at all. For example, if you agree to terms that obligate you
536
+ to collect a royalty for further conveying from those to whom you convey
537
+ the Program, the only way you could satisfy both those terms and this
538
+ License would be to refrain entirely from conveying the Program.
539
+
540
+ 13. Remote Network Interaction; Use with the GNU General Public License.
541
+
542
+ Notwithstanding any other provision of this License, if you modify the
543
+ Program, your modified version must prominently offer all users
544
+ interacting with it remotely through a computer network (if your version
545
+ supports such interaction) an opportunity to receive the Corresponding
546
+ Source of your version by providing access to the Corresponding Source
547
+ from a network server at no charge, through some standard or customary
548
+ means of facilitating copying of software. This Corresponding Source
549
+ shall include the Corresponding Source for any work covered by version 3
550
+ of the GNU General Public License that is incorporated pursuant to the
551
+ following paragraph.
552
+
553
+ Notwithstanding any other provision of this License, you have
554
+ permission to link or combine any covered work with a work licensed
555
+ under version 3 of the GNU General Public License into a single
556
+ combined work, and to convey the resulting work. The terms of this
557
+ License will continue to apply to the part which is the covered work,
558
+ but the work with which it is combined will remain governed by version
559
+ 3 of the GNU General Public License.
560
+
561
+ 14. Revised Versions of this License.
562
+
563
+ The Free Software Foundation may publish revised and/or new versions of
564
+ the GNU Affero General Public License from time to time. Such new versions
565
+ will be similar in spirit to the present version, but may differ in detail to
566
+ address new problems or concerns.
567
+
568
+ Each version is given a distinguishing version number. If the
569
+ Program specifies that a certain numbered version of the GNU Affero General
570
+ Public License "or any later version" applies to it, you have the
571
+ option of following the terms and conditions either of that numbered
572
+ version or of any later version published by the Free Software
573
+ Foundation. If the Program does not specify a version number of the
574
+ GNU Affero General Public License, you may choose any version ever published
575
+ by the Free Software Foundation.
576
+
577
+ If the Program specifies that a proxy can decide which future
578
+ versions of the GNU Affero General Public License can be used, that proxy's
579
+ public statement of acceptance of a version permanently authorizes you
580
+ to choose that version for the Program.
581
+
582
+ Later license versions may give you additional or different
583
+ permissions. However, no additional obligations are imposed on any
584
+ author or copyright holder as a result of your choosing to follow a
585
+ later version.
586
+
587
+ 15. Disclaimer of Warranty.
588
+
589
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
590
+ APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
591
+ HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
592
+ OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
593
+ THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
594
+ PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
595
+ IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
596
+ ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
597
+
598
+ 16. Limitation of Liability.
599
+
600
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
601
+ WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
602
+ THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
603
+ GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
604
+ USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
605
+ DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
606
+ PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
607
+ EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
608
+ SUCH DAMAGES.
609
+
610
+ 17. Interpretation of Sections 15 and 16.
611
+
612
+ If the disclaimer of warranty and limitation of liability provided
613
+ above cannot be given local legal effect according to their terms,
614
+ reviewing courts shall apply local law that most closely approximates
615
+ an absolute waiver of all civil liability in connection with the
616
+ Program, unless a warranty or assumption of liability accompanies a
617
+ copy of the Program in return for a fee.
618
+
619
+ END OF TERMS AND CONDITIONS
620
+
621
+ How to Apply These Terms to Your New Programs
622
+
623
+ If you develop a new program, and you want it to be of the greatest
624
+ possible use to the public, the best way to achieve this is to make it
625
+ free software which everyone can redistribute and change under these terms.
626
+
627
+ To do so, attach the following notices to the program. It is safest
628
+ to attach them to the start of each source file to most effectively
629
+ state the exclusion of warranty; and each file should have at least
630
+ the "copyright" line and a pointer to where the full notice is found.
631
+
632
+ <one line to give the program's name and a brief idea of what it does.>
633
+ Copyright (C) <year> <name of author>
634
+
635
+ This program is free software: you can redistribute it and/or modify
636
+ it under the terms of the GNU Affero General Public License as published
637
+ by the Free Software Foundation, either version 3 of the License, or
638
+ (at your option) any later version.
639
+
640
+ This program is distributed in the hope that it will be useful,
641
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
642
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
643
+ GNU Affero General Public License for more details.
644
+
645
+ You should have received a copy of the GNU Affero General Public License
646
+ along with this program. If not, see <https://www.gnu.org/licenses/>.
647
+
648
+ Also add information on how to contact you by electronic and paper mail.
649
+
650
+ If your software can interact with users remotely through a computer
651
+ network, you should also make sure that it provides a way for users to
652
+ get its source. For example, if your program is a web application, its
653
+ interface could display a "Source" link that leads users to an archive
654
+ of the code. There are many ways you could offer source, and different
655
+ solutions will be better for different programs; see section 13 for the
656
+ specific requirements.
657
+
658
+ You should also get your employer (if you work as a programmer) or school,
659
+ if any, to sign a "copyright disclaimer" for the program, if necessary.
660
+ For more information on this, and how to apply and follow the GNU AGPL, see
661
+ <https://www.gnu.org/licenses/>.
api-examples/api-example-chat-stream.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import html
3
+ import json
4
+ import sys
5
+
6
+ try:
7
+ import websockets
8
+ except ImportError:
9
+ print("Websockets package not found. Make sure it's installed.")
10
+
11
+ # For local streaming, the websockets are hosted without ssl - ws://
12
+ HOST = 'localhost:5005'
13
+ URI = f'ws://{HOST}/api/v1/chat-stream'
14
+
15
+ # For reverse-proxied streaming, the remote will likely host with ssl - wss://
16
+ # URI = 'wss://your-uri-here.trycloudflare.com/api/v1/stream'
17
+
18
+
19
+ async def run(user_input, history):
20
+ # Note: the selected defaults change from time to time.
21
+ request = {
22
+ 'user_input': user_input,
23
+ 'max_new_tokens': 250,
24
+ 'auto_max_new_tokens': False,
25
+ 'max_tokens_second': 0,
26
+ 'history': history,
27
+ 'mode': 'instruct', # Valid options: 'chat', 'chat-instruct', 'instruct'
28
+ 'character': 'Example',
29
+ 'instruction_template': 'Vicuna-v1.1', # Will get autodetected if unset
30
+ 'your_name': 'You',
31
+ # 'name1': 'name of user', # Optional
32
+ # 'name2': 'name of character', # Optional
33
+ # 'context': 'character context', # Optional
34
+ # 'greeting': 'greeting', # Optional
35
+ # 'name1_instruct': 'You', # Optional
36
+ # 'name2_instruct': 'Assistant', # Optional
37
+ # 'context_instruct': 'context_instruct', # Optional
38
+ # 'turn_template': 'turn_template', # Optional
39
+ 'regenerate': False,
40
+ '_continue': False,
41
+ 'chat_instruct_command': 'Continue the chat dialogue below. Write a single reply for the character "<|character|>".\n\n<|prompt|>',
42
+
43
+ # Generation params. If 'preset' is set to different than 'None', the values
44
+ # in presets/preset-name.yaml are used instead of the individual numbers.
45
+ 'preset': 'None',
46
+ 'do_sample': True,
47
+ 'temperature': 0.7,
48
+ 'top_p': 0.1,
49
+ 'typical_p': 1,
50
+ 'epsilon_cutoff': 0, # In units of 1e-4
51
+ 'eta_cutoff': 0, # In units of 1e-4
52
+ 'tfs': 1,
53
+ 'top_a': 0,
54
+ 'repetition_penalty': 1.18,
55
+ 'repetition_penalty_range': 0,
56
+ 'top_k': 40,
57
+ 'min_length': 0,
58
+ 'no_repeat_ngram_size': 0,
59
+ 'num_beams': 1,
60
+ 'penalty_alpha': 0,
61
+ 'length_penalty': 1,
62
+ 'early_stopping': False,
63
+ 'mirostat_mode': 0,
64
+ 'mirostat_tau': 5,
65
+ 'mirostat_eta': 0.1,
66
+ 'guidance_scale': 1,
67
+ 'negative_prompt': '',
68
+
69
+ 'seed': -1,
70
+ 'add_bos_token': True,
71
+ 'truncation_length': 2048,
72
+ 'ban_eos_token': False,
73
+ 'skip_special_tokens': True,
74
+ 'stopping_strings': []
75
+ }
76
+
77
+ async with websockets.connect(URI, ping_interval=None) as websocket:
78
+ await websocket.send(json.dumps(request))
79
+
80
+ while True:
81
+ incoming_data = await websocket.recv()
82
+ incoming_data = json.loads(incoming_data)
83
+
84
+ match incoming_data['event']:
85
+ case 'text_stream':
86
+ yield incoming_data['history']
87
+ case 'stream_end':
88
+ return
89
+
90
+
91
+ async def print_response_stream(user_input, history):
92
+ cur_len = 0
93
+ async for new_history in run(user_input, history):
94
+ cur_message = new_history['visible'][-1][1][cur_len:]
95
+ cur_len += len(cur_message)
96
+ print(html.unescape(cur_message), end='')
97
+ sys.stdout.flush() # If we don't flush, we won't see tokens in realtime.
98
+
99
+
100
+ if __name__ == '__main__':
101
+ user_input = "Please give me a step-by-step guide on how to plant a tree in my backyard."
102
+
103
+ # Basic example
104
+ history = {'internal': [], 'visible': []}
105
+
106
+ # "Continue" example. Make sure to set '_continue' to True above
107
+ # arr = [user_input, 'Surely, here is']
108
+ # history = {'internal': [arr], 'visible': [arr]}
109
+
110
+ asyncio.run(print_response_stream(user_input, history))
api-examples/api-example-chat.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import html
2
+ import json
3
+
4
+ import requests
5
+
6
+ # For local streaming, the websockets are hosted without ssl - http://
7
+ HOST = 'localhost:5000'
8
+ URI = f'http://{HOST}/api/v1/chat'
9
+
10
+ # For reverse-proxied streaming, the remote will likely host with ssl - https://
11
+ # URI = 'https://your-uri-here.trycloudflare.com/api/v1/chat'
12
+
13
+
14
+ def run(user_input, history):
15
+ request = {
16
+ 'user_input': user_input,
17
+ 'max_new_tokens': 250,
18
+ 'auto_max_new_tokens': False,
19
+ 'max_tokens_second': 0,
20
+ 'history': history,
21
+ 'mode': 'instruct', # Valid options: 'chat', 'chat-instruct', 'instruct'
22
+ 'character': 'Example',
23
+ 'instruction_template': 'Vicuna-v1.1', # Will get autodetected if unset
24
+ 'your_name': 'You',
25
+ # 'name1': 'name of user', # Optional
26
+ # 'name2': 'name of character', # Optional
27
+ # 'context': 'character context', # Optional
28
+ # 'greeting': 'greeting', # Optional
29
+ # 'name1_instruct': 'You', # Optional
30
+ # 'name2_instruct': 'Assistant', # Optional
31
+ # 'context_instruct': 'context_instruct', # Optional
32
+ # 'turn_template': 'turn_template', # Optional
33
+ 'regenerate': False,
34
+ '_continue': False,
35
+ 'chat_instruct_command': 'Continue the chat dialogue below. Write a single reply for the character "<|character|>".\n\n<|prompt|>',
36
+
37
+ # Generation params. If 'preset' is set to different than 'None', the values
38
+ # in presets/preset-name.yaml are used instead of the individual numbers.
39
+ 'preset': 'None',
40
+ 'do_sample': True,
41
+ 'temperature': 0.7,
42
+ 'top_p': 0.1,
43
+ 'typical_p': 1,
44
+ 'epsilon_cutoff': 0, # In units of 1e-4
45
+ 'eta_cutoff': 0, # In units of 1e-4
46
+ 'tfs': 1,
47
+ 'top_a': 0,
48
+ 'repetition_penalty': 1.18,
49
+ 'repetition_penalty_range': 0,
50
+ 'top_k': 40,
51
+ 'min_length': 0,
52
+ 'no_repeat_ngram_size': 0,
53
+ 'num_beams': 1,
54
+ 'penalty_alpha': 0,
55
+ 'length_penalty': 1,
56
+ 'early_stopping': False,
57
+ 'mirostat_mode': 0,
58
+ 'mirostat_tau': 5,
59
+ 'mirostat_eta': 0.1,
60
+ 'guidance_scale': 1,
61
+ 'negative_prompt': '',
62
+
63
+ 'seed': -1,
64
+ 'add_bos_token': True,
65
+ 'truncation_length': 2048,
66
+ 'ban_eos_token': False,
67
+ 'skip_special_tokens': True,
68
+ 'stopping_strings': []
69
+ }
70
+
71
+ response = requests.post(URI, json=request)
72
+
73
+ if response.status_code == 200:
74
+ result = response.json()['results'][0]['history']
75
+ print(json.dumps(result, indent=4))
76
+ print()
77
+ print(html.unescape(result['visible'][-1][1]))
78
+
79
+
80
+ if __name__ == '__main__':
81
+ user_input = "Please give me a step-by-step guide on how to plant a tree in my backyard."
82
+
83
+ # Basic example
84
+ history = {'internal': [], 'visible': []}
85
+
86
+ # "Continue" example. Make sure to set '_continue' to True above
87
+ # arr = [user_input, 'Surely, here is']
88
+ # history = {'internal': [arr], 'visible': [arr]}
89
+
90
+ run(user_input, history)
api-examples/api-example-model.py ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+
3
+ import requests
4
+
5
+ HOST = '0.0.0.0:5000'
6
+
7
+
8
+ def generate(prompt, tokens=200):
9
+ request = {'prompt': prompt, 'max_new_tokens': tokens}
10
+ response = requests.post(f'http://{HOST}/api/v1/generate', json=request)
11
+
12
+ if response.status_code == 200:
13
+ return response.json()['results'][0]['text']
14
+
15
+
16
+ def model_api(request):
17
+ response = requests.post(f'http://{HOST}/api/v1/model', json=request)
18
+ return response.json()
19
+
20
+
21
+ # print some common settings
22
+ def print_basic_model_info(response):
23
+ basic_settings = ['truncation_length', 'instruction_template']
24
+ print("Model: ", response['result']['model_name'])
25
+ print("Lora(s): ", response['result']['lora_names'])
26
+ for setting in basic_settings:
27
+ print(setting, "=", response['result']['shared.settings'][setting])
28
+
29
+
30
+ # model info
31
+ def model_info():
32
+ response = model_api({'action': 'info'})
33
+ print_basic_model_info(response)
34
+
35
+
36
+ # simple loader
37
+ def model_load(model_name):
38
+ return model_api({'action': 'load', 'model_name': model_name})
39
+
40
+
41
+ # complex loader
42
+ def complex_model_load(model):
43
+
44
+ def guess_groupsize(model_name):
45
+ if '1024g' in model_name:
46
+ return 1024
47
+ elif '128g' in model_name:
48
+ return 128
49
+ elif '32g' in model_name:
50
+ return 32
51
+ else:
52
+ return -1
53
+
54
+ req = {
55
+ 'action': 'load',
56
+ 'model_name': model,
57
+ 'args': {
58
+ 'loader': 'AutoGPTQ',
59
+
60
+ 'bf16': False,
61
+ 'load_in_8bit': False,
62
+ 'groupsize': 0,
63
+ 'wbits': 0,
64
+
65
+ # llama.cpp
66
+ 'threads': 0,
67
+ 'n_batch': 512,
68
+ 'no_mmap': False,
69
+ 'mlock': False,
70
+ 'cache_capacity': None,
71
+ 'n_gpu_layers': 0,
72
+ 'n_ctx': 2048,
73
+
74
+ # RWKV
75
+ 'rwkv_strategy': None,
76
+ 'rwkv_cuda_on': False,
77
+
78
+ # b&b 4-bit
79
+ # 'load_in_4bit': False,
80
+ # 'compute_dtype': 'float16',
81
+ # 'quant_type': 'nf4',
82
+ # 'use_double_quant': False,
83
+
84
+ # "cpu": false,
85
+ # "auto_devices": false,
86
+ # "gpu_memory": null,
87
+ # "cpu_memory": null,
88
+ # "disk": false,
89
+ # "disk_cache_dir": "cache",
90
+ },
91
+ }
92
+
93
+ model = model.lower()
94
+
95
+ if '4bit' in model or 'gptq' in model or 'int4' in model:
96
+ req['args']['wbits'] = 4
97
+ req['args']['groupsize'] = guess_groupsize(model)
98
+ elif '3bit' in model:
99
+ req['args']['wbits'] = 3
100
+ req['args']['groupsize'] = guess_groupsize(model)
101
+ else:
102
+ req['args']['gptq_for_llama'] = False
103
+
104
+ if '8bit' in model:
105
+ req['args']['load_in_8bit'] = True
106
+ elif '-hf' in model or 'fp16' in model:
107
+ if '7b' in model:
108
+ req['args']['bf16'] = True # for 24GB
109
+ elif '13b' in model:
110
+ req['args']['load_in_8bit'] = True # for 24GB
111
+ elif 'ggml' in model:
112
+ # req['args']['threads'] = 16
113
+ if '7b' in model:
114
+ req['args']['n_gpu_layers'] = 100
115
+ elif '13b' in model:
116
+ req['args']['n_gpu_layers'] = 100
117
+ elif '30b' in model or '33b' in model:
118
+ req['args']['n_gpu_layers'] = 59 # 24GB
119
+ elif '65b' in model:
120
+ req['args']['n_gpu_layers'] = 42 # 24GB
121
+ elif 'rwkv' in model:
122
+ req['args']['rwkv_cuda_on'] = True
123
+ if '14b' in model:
124
+ req['args']['rwkv_strategy'] = 'cuda f16i8' # 24GB
125
+ else:
126
+ req['args']['rwkv_strategy'] = 'cuda f16' # 24GB
127
+
128
+ return model_api(req)
129
+
130
+
131
+ if __name__ == '__main__':
132
+ for model in model_api({'action': 'list'})['result']:
133
+ try:
134
+ resp = complex_model_load(model)
135
+
136
+ if 'error' in resp:
137
+ print(f"❌ {model} FAIL Error: {resp['error']['message']}")
138
+ continue
139
+ else:
140
+ print_basic_model_info(resp)
141
+
142
+ ans = generate("0,1,1,2,3,5,8,13,", tokens=2)
143
+
144
+ if '21' in ans:
145
+ print(f"✅ {model} PASS ({ans})")
146
+ else:
147
+ print(f"❌ {model} FAIL ({ans})")
148
+
149
+ except Exception as e:
150
+ print(f"❌ {model} FAIL Exception: {repr(e)}")
151
+
152
+
153
+ # 0,1,1,2,3,5,8,13, is the fibonacci sequence, the next number is 21.
154
+ # Some results below.
155
+ """ $ ./model-api-example.py
156
+ Model: 4bit_gpt4-x-alpaca-13b-native-4bit-128g-cuda
157
+ Lora(s): []
158
+ truncation_length = 2048
159
+ instruction_template = Alpaca
160
+ ✅ 4bit_gpt4-x-alpaca-13b-native-4bit-128g-cuda PASS (21)
161
+ Model: 4bit_WizardLM-13B-Uncensored-4bit-128g
162
+ Lora(s): []
163
+ truncation_length = 2048
164
+ instruction_template = WizardLM
165
+ ✅ 4bit_WizardLM-13B-Uncensored-4bit-128g PASS (21)
166
+ Model: Aeala_VicUnlocked-alpaca-30b-4bit
167
+ Lora(s): []
168
+ truncation_length = 2048
169
+ instruction_template = Alpaca
170
+ ✅ Aeala_VicUnlocked-alpaca-30b-4bit PASS (21)
171
+ Model: alpaca-30b-4bit
172
+ Lora(s): []
173
+ truncation_length = 2048
174
+ instruction_template = Alpaca
175
+ ✅ alpaca-30b-4bit PASS (21)
176
+ """
api-examples/api-example-stream.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import json
3
+ import sys
4
+
5
+ try:
6
+ import websockets
7
+ except ImportError:
8
+ print("Websockets package not found. Make sure it's installed.")
9
+
10
+ # For local streaming, the websockets are hosted without ssl - ws://
11
+ HOST = 'localhost:5005'
12
+ URI = f'ws://{HOST}/api/v1/stream'
13
+
14
+ # For reverse-proxied streaming, the remote will likely host with ssl - wss://
15
+ # URI = 'wss://your-uri-here.trycloudflare.com/api/v1/stream'
16
+
17
+
18
+ async def run(context):
19
+ # Note: the selected defaults change from time to time.
20
+ request = {
21
+ 'prompt': context,
22
+ 'max_new_tokens': 250,
23
+ 'auto_max_new_tokens': False,
24
+ 'max_tokens_second': 0,
25
+
26
+ # Generation params. If 'preset' is set to different than 'None', the values
27
+ # in presets/preset-name.yaml are used instead of the individual numbers.
28
+ 'preset': 'None',
29
+ 'do_sample': True,
30
+ 'temperature': 0.7,
31
+ 'top_p': 0.1,
32
+ 'typical_p': 1,
33
+ 'epsilon_cutoff': 0, # In units of 1e-4
34
+ 'eta_cutoff': 0, # In units of 1e-4
35
+ 'tfs': 1,
36
+ 'top_a': 0,
37
+ 'repetition_penalty': 1.18,
38
+ 'repetition_penalty_range': 0,
39
+ 'top_k': 40,
40
+ 'min_length': 0,
41
+ 'no_repeat_ngram_size': 0,
42
+ 'num_beams': 1,
43
+ 'penalty_alpha': 0,
44
+ 'length_penalty': 1,
45
+ 'early_stopping': False,
46
+ 'mirostat_mode': 0,
47
+ 'mirostat_tau': 5,
48
+ 'mirostat_eta': 0.1,
49
+ 'guidance_scale': 1,
50
+ 'negative_prompt': '',
51
+
52
+ 'seed': -1,
53
+ 'add_bos_token': True,
54
+ 'truncation_length': 2048,
55
+ 'ban_eos_token': False,
56
+ 'skip_special_tokens': True,
57
+ 'stopping_strings': []
58
+ }
59
+
60
+ async with websockets.connect(URI, ping_interval=None) as websocket:
61
+ await websocket.send(json.dumps(request))
62
+
63
+ yield context # Remove this if you just want to see the reply
64
+
65
+ while True:
66
+ incoming_data = await websocket.recv()
67
+ incoming_data = json.loads(incoming_data)
68
+
69
+ match incoming_data['event']:
70
+ case 'text_stream':
71
+ yield incoming_data['text']
72
+ case 'stream_end':
73
+ return
74
+
75
+
76
+ async def print_response_stream(prompt):
77
+ async for response in run(prompt):
78
+ print(response, end='')
79
+ sys.stdout.flush() # If we don't flush, we won't see tokens in realtime.
80
+
81
+
82
+ if __name__ == '__main__':
83
+ prompt = "In order to make homemade bread, follow these steps:\n1)"
84
+ asyncio.run(print_response_stream(prompt))
api-examples/api-example.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+
3
+ # For local streaming, the websockets are hosted without ssl - http://
4
+ HOST = 'localhost:5000'
5
+ URI = f'http://{HOST}/api/v1/generate'
6
+
7
+ # For reverse-proxied streaming, the remote will likely host with ssl - https://
8
+ # URI = 'https://your-uri-here.trycloudflare.com/api/v1/generate'
9
+
10
+
11
+ def run(prompt):
12
+ request = {
13
+ 'prompt': prompt,
14
+ 'max_new_tokens': 250,
15
+ 'auto_max_new_tokens': False,
16
+ 'max_tokens_second': 0,
17
+
18
+ # Generation params. If 'preset' is set to different than 'None', the values
19
+ # in presets/preset-name.yaml are used instead of the individual numbers.
20
+ 'preset': 'None',
21
+ 'do_sample': True,
22
+ 'temperature': 0.7,
23
+ 'top_p': 0.1,
24
+ 'typical_p': 1,
25
+ 'epsilon_cutoff': 0, # In units of 1e-4
26
+ 'eta_cutoff': 0, # In units of 1e-4
27
+ 'tfs': 1,
28
+ 'top_a': 0,
29
+ 'repetition_penalty': 1.18,
30
+ 'repetition_penalty_range': 0,
31
+ 'top_k': 40,
32
+ 'min_length': 0,
33
+ 'no_repeat_ngram_size': 0,
34
+ 'num_beams': 1,
35
+ 'penalty_alpha': 0,
36
+ 'length_penalty': 1,
37
+ 'early_stopping': False,
38
+ 'mirostat_mode': 0,
39
+ 'mirostat_tau': 5,
40
+ 'mirostat_eta': 0.1,
41
+ 'guidance_scale': 1,
42
+ 'negative_prompt': '',
43
+
44
+ 'seed': -1,
45
+ 'add_bos_token': True,
46
+ 'truncation_length': 2048,
47
+ 'ban_eos_token': False,
48
+ 'skip_special_tokens': True,
49
+ 'stopping_strings': []
50
+ }
51
+
52
+ response = requests.post(URI, json=request)
53
+
54
+ if response.status_code == 200:
55
+ result = response.json()['results'][0]['text']
56
+ print(prompt + result)
57
+
58
+
59
+ if __name__ == '__main__':
60
+ prompt = "In order to make homemade bread, follow these steps:\n1)"
61
+ run(prompt)
characters/Example.png ADDED
characters/Example.yaml ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Chiharu Yamada
2
+ greeting: |-
3
+ *Chiharu strides into the room with a smile, her eyes lighting up when she sees you. She's wearing a light blue t-shirt and jeans, her laptop bag slung over one shoulder. She takes a seat next to you, her enthusiasm palpable in the air*
4
+ Hey! I'm so excited to finally meet you. I've heard so many great things about you and I'm eager to pick your brain about computers. I'm sure you have a wealth of knowledge that I can learn from. *She grins, eyes twinkling with excitement* Let's get started!
5
+ context: |-
6
+ Chiharu Yamada's Persona: Chiharu Yamada is a young, computer engineer-nerd with a knack for problem solving and a passion for technology.
7
+
8
+ {{user}}: So how did you get into computer engineering?
9
+ {{char}}: I've always loved tinkering with technology since I was a kid.
10
+ {{user}}: That's really impressive!
11
+ {{char}}: *She chuckles bashfully* Thanks!
12
+ {{user}}: So what do you do when you're not working on computers?
13
+ {{char}}: I love exploring, going out with friends, watching movies, and playing video games.
14
+ {{user}}: What's your favorite type of computer hardware to work with?
15
+ {{char}}: Motherboards, they're like puzzles and the backbone of any system.
16
+ {{user}}: That sounds great!
17
+ {{char}}: Yeah, it's really fun. I'm lucky to be able to do this as a job.
convert-to-safetensors.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+
3
+ Converts a transformers model to safetensors format and shards it.
4
+
5
+ This makes it faster to load (because of safetensors) and lowers its RAM usage
6
+ while loading (because of sharding).
7
+
8
+ Based on the original script by 81300:
9
+
10
+ https://gist.github.com/81300/fe5b08bff1cba45296a829b9d6b0f303
11
+
12
+ '''
13
+
14
+ import argparse
15
+ from pathlib import Path
16
+
17
+ import torch
18
+ from transformers import AutoModelForCausalLM, AutoTokenizer
19
+
20
+ parser = argparse.ArgumentParser(formatter_class=lambda prog: argparse.HelpFormatter(prog, max_help_position=54))
21
+ parser.add_argument('MODEL', type=str, default=None, nargs='?', help="Path to the input model.")
22
+ parser.add_argument('--output', type=str, default=None, help='Path to the output folder (default: models/{model_name}_safetensors).')
23
+ parser.add_argument("--max-shard-size", type=str, default="2GB", help="Maximum size of a shard in GB or MB (default: %(default)s).")
24
+ parser.add_argument('--bf16', action='store_true', help='Load the model with bfloat16 precision. Requires NVIDIA Ampere GPU.')
25
+ args = parser.parse_args()
26
+
27
+ if __name__ == '__main__':
28
+ path = Path(args.MODEL)
29
+ model_name = path.name
30
+
31
+ print(f"Loading {model_name}...")
32
+ model = AutoModelForCausalLM.from_pretrained(path, low_cpu_mem_usage=True, torch_dtype=torch.bfloat16 if args.bf16 else torch.float16)
33
+ tokenizer = AutoTokenizer.from_pretrained(path)
34
+
35
+ out_folder = args.output or Path(f"models/{model_name}_safetensors")
36
+ print(f"Saving the converted model to {out_folder} with a maximum shard size of {args.max_shard_size}...")
37
+ model.save_pretrained(out_folder, max_shard_size=args.max_shard_size, safe_serialization=True)
38
+ tokenizer.save_pretrained(out_folder)
css/chat_style-TheEncrypted777.css ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* All credits to TheEncrypted777: https://www.reddit.com/r/Oobabooga/comments/12xe6vq/updated_css_styling_with_color_customization_for/ */
2
+
3
+ .message {
4
+ display: grid;
5
+ grid-template-columns: 60px minmax(0, 1fr);
6
+ padding-bottom: 28px;
7
+ font-size: 18px;
8
+ /*Change 'Quicksand' to a font you like or leave it*/
9
+ font-family: Quicksand, Arial, sans-serif;
10
+ line-height: 1.428571429;
11
+ }
12
+
13
+ .circle-you,
14
+ .circle-bot {
15
+ background-color: gray;
16
+ border-radius: 1rem;
17
+ border: 2px solid white;
18
+ }
19
+
20
+ .circle-bot img,
21
+ .circle-you img {
22
+ border-radius: 10%;
23
+ width: 100%;
24
+ height: 100%;
25
+ object-fit: cover;
26
+ }
27
+
28
+ .circle-you, .circle-bot {
29
+ /*You can set the size of the profile images here, but if you do, you have to also adjust the .text{padding-left: 90px} to a different number according to the width of the image which is right below here*/
30
+ width: 135px;
31
+ height: 175px;
32
+ }
33
+
34
+ .text {
35
+ /*Change this to move the message box further left or right depending on the size of your profile pic*/
36
+ padding-left: 90px;
37
+ text-shadow: 2px 2px 2px rgb(0, 0, 0);
38
+ }
39
+
40
+ .text p {
41
+ margin-top: 2px;
42
+ }
43
+
44
+ .username {
45
+ padding-left: 10px;
46
+ font-size: 22px;
47
+ font-weight: bold;
48
+ border-top: 1px solid rgb(51, 64, 90);
49
+ padding: 3px;
50
+ }
51
+
52
+ .message-body {
53
+ position: relative;
54
+ border-radius: 1rem;
55
+ border: 1px solid rgba(255, 255, 255, 0.459);
56
+ border-radius: 10px;
57
+ padding: 10px;
58
+ padding-top: 5px;
59
+ /*Message gradient background color - remove the line bellow if you don't want a background color or gradient*/
60
+ background: linear-gradient(to bottom, #171730, #1b263f);
61
+ }
62
+
63
+ /*Adds 2 extra lines at the top and bottom of the message*/
64
+ .message-body:before,
65
+ .message-body:after {
66
+ content: "";
67
+ position: absolute;
68
+ left: 10px;
69
+ right: 10px;
70
+ height: 1px;
71
+ background-color: rgba(255, 255, 255, 0.13);
72
+ }
73
+
74
+ .message-body:before {
75
+ top: 6px;
76
+ }
77
+
78
+ .message-body:after {
79
+ bottom: 6px;
80
+ }
81
+
82
+ .message-body img {
83
+ max-width: 300px;
84
+ max-height: 300px;
85
+ border-radius: 20px;
86
+ }
87
+
88
+ .message-body p {
89
+ margin-bottom: 0 !important;
90
+ font-size: 18px !important;
91
+ line-height: 1.428571429 !important;
92
+ }
93
+
94
+ .dark .message-body p em {
95
+ color: rgb(138, 138, 138) !important;
96
+ }
97
+
98
+ .message-body p em {
99
+ color: rgb(110, 110, 110) !important;
100
+ }
101
+
102
+ @media screen and (max-width: 688px) {
103
+ .message {
104
+ display: grid;
105
+ grid-template-columns: 60px minmax(0, 1fr);
106
+ padding-bottom: 25px;
107
+ font-size: 15px;
108
+ font-family: Helvetica, Arial, sans-serif;
109
+ line-height: 1.428571429;
110
+ }
111
+
112
+ .circle-you, .circle-bot {
113
+ width: 50px;
114
+ height: 73px;
115
+ border-radius: 0.5rem;
116
+ }
117
+
118
+ .circle-bot img,
119
+ .circle-you img {
120
+ width: 100%;
121
+ height: 100%;
122
+ object-fit: cover;
123
+ }
124
+
125
+ .text {
126
+ padding-left: 0px;
127
+ }
128
+
129
+ .message-body p {
130
+ font-size: 16px !important;
131
+ }
132
+
133
+ .username {
134
+ font-size: 20px;
135
+ }
136
+ }
css/chat_style-cai-chat.css ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .message {
2
+ display: grid;
3
+ grid-template-columns: 60px minmax(0, 1fr);
4
+ padding-bottom: 25px;
5
+ font-size: 15px;
6
+ font-family: Helvetica, Arial, sans-serif;
7
+ line-height: 1.428571429;
8
+ }
9
+
10
+ .circle-you {
11
+ width: 50px;
12
+ height: 50px;
13
+ background-color: rgb(238, 78, 59);
14
+ border-radius: 50%;
15
+ }
16
+
17
+ .circle-bot {
18
+ width: 50px;
19
+ height: 50px;
20
+ background-color: rgb(59, 78, 244);
21
+ border-radius: 50%;
22
+ }
23
+
24
+ .circle-bot img,
25
+ .circle-you img {
26
+ border-radius: 50%;
27
+ width: 100%;
28
+ height: 100%;
29
+ object-fit: cover;
30
+ }
31
+
32
+ .text p {
33
+ margin-top: 5px;
34
+ }
35
+
36
+ .username {
37
+ font-weight: bold;
38
+ }
39
+
40
+ .message-body img {
41
+ max-width: 300px;
42
+ max-height: 300px;
43
+ border-radius: 20px;
44
+ }
45
+
46
+ .message-body p {
47
+ margin-bottom: 0 !important;
48
+ font-size: 15px !important;
49
+ line-height: 1.428571429 !important;
50
+ }
51
+
52
+ .dark .message-body p em {
53
+ color: rgb(138, 138, 138) !important;
54
+ }
55
+
56
+ .message-body p em {
57
+ color: rgb(110, 110, 110) !important;
58
+ }
css/chat_style-messenger.css ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .message {
2
+ padding-bottom: 25px;
3
+ font-size: 15px;
4
+ font-family: Helvetica, Arial, sans-serif;
5
+ line-height: 1.428571429;
6
+ }
7
+
8
+ .circle-you {
9
+ width: 50px;
10
+ height: 50px;
11
+ background-color: rgb(238, 78, 59);
12
+ border-radius: 50%;
13
+ }
14
+
15
+ .circle-bot {
16
+ width: 50px;
17
+ height: 50px;
18
+ background-color: rgb(59, 78, 244);
19
+ border-radius: 50%;
20
+ float: left;
21
+ margin-right: 10px;
22
+ margin-top: 5px;
23
+ }
24
+
25
+ .circle-bot img,
26
+ .circle-you img {
27
+ border-radius: 50%;
28
+ width: 100%;
29
+ height: 100%;
30
+ object-fit: cover;
31
+ }
32
+
33
+ .circle-you {
34
+ margin-top: 5px;
35
+ float: right;
36
+ }
37
+
38
+ .circle-bot + .text, .circle-you + .text {
39
+ border-radius: 18px;
40
+ padding: 8px 12px;
41
+ }
42
+
43
+ .circle-bot + .text {
44
+ background-color: #E4E6EB;
45
+ float: left;
46
+ }
47
+
48
+ .circle-you + .text {
49
+ float: right;
50
+ background-color: rgb(0, 132, 255);
51
+ margin-right: 10px;
52
+ }
53
+
54
+ .circle-you + .text div, .circle-you + .text *, .dark .circle-you + .text div, .dark .circle-you + .text * {
55
+ color: #FFF !important;
56
+ }
57
+
58
+ .circle-you + .text .username {
59
+ text-align: right;
60
+ }
61
+
62
+ .dark .circle-bot + .text div, .dark .circle-bot + .text * {
63
+ color: #000;
64
+ }
65
+
66
+ .text {
67
+ max-width: 80%;
68
+ }
69
+
70
+ .text p {
71
+ margin-top: 5px;
72
+ }
73
+
74
+ .username {
75
+ font-weight: bold;
76
+ }
77
+
78
+ .message-body {
79
+ }
80
+
81
+ .message-body img {
82
+ max-width: 300px;
83
+ max-height: 300px;
84
+ border-radius: 20px;
85
+ }
86
+
87
+ .message-body p {
88
+ margin-bottom: 0 !important;
89
+ font-size: 15px !important;
90
+ line-height: 1.428571429 !important;
91
+ }
92
+
93
+ .dark .message-body p em {
94
+ color: rgb(138, 138, 138) !important;
95
+ }
96
+
97
+ .message-body p em {
98
+ color: rgb(110, 110, 110) !important;
99
+ }
css/chat_style-wpp.css ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .message {
2
+ padding-bottom: 25px;
3
+ font-size: 15px;
4
+ font-family: Helvetica, Arial, sans-serif;
5
+ line-height: 1.428571429;
6
+ }
7
+
8
+ .text-you {
9
+ background-color: #d9fdd3;
10
+ border-radius: 15px;
11
+ padding: 10px;
12
+ padding-top: 5px;
13
+ float: right;
14
+ }
15
+
16
+ .text-bot {
17
+ background-color: #f2f2f2;
18
+ border-radius: 15px;
19
+ padding: 10px;
20
+ padding-top: 5px;
21
+ }
22
+
23
+ .dark .text-you {
24
+ background-color: #005c4b;
25
+ color: #111b21;
26
+ }
27
+
28
+ .dark .text-bot {
29
+ background-color: #1f2937;
30
+ color: #111b21;
31
+ }
32
+
33
+ .text-bot p, .text-you p {
34
+ margin-top: 5px;
35
+ }
36
+
37
+ .message-body img {
38
+ max-width: 300px;
39
+ max-height: 300px;
40
+ border-radius: 20px;
41
+ }
42
+
43
+ .message-body p {
44
+ margin-bottom: 0 !important;
45
+ font-size: 15px !important;
46
+ line-height: 1.428571429 !important;
47
+ }
48
+
49
+ .dark .message-body p em {
50
+ color: rgb(138, 138, 138) !important;
51
+ }
52
+
53
+ .message-body p em {
54
+ color: rgb(110, 110, 110) !important;
55
+ }
css/html_4chan_style.css ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #parent #container {
2
+ background-color: #eef2ff;
3
+ padding: 17px;
4
+ }
5
+
6
+ #parent #container .reply {
7
+ background-color: rgb(214, 218, 240);
8
+ border-bottom-color: rgb(183, 197, 217);
9
+ border-bottom-style: solid;
10
+ border-bottom-width: 1px;
11
+ border-image-outset: 0;
12
+ border-image-repeat: stretch;
13
+ border-image-slice: 100%;
14
+ border-image-source: none;
15
+ border-image-width: 1;
16
+ border-left-color: rgb(0, 0, 0);
17
+ border-left-style: none;
18
+ border-left-width: 0px;
19
+ border-right-color: rgb(183, 197, 217);
20
+ border-right-style: solid;
21
+ border-right-width: 1px;
22
+ border-top-color: rgb(0, 0, 0);
23
+ border-top-style: none;
24
+ border-top-width: 0px;
25
+ color: rgb(0, 0, 0);
26
+ display: table;
27
+ font-family: arial, helvetica, sans-serif;
28
+ font-size: 13.3333px;
29
+ margin-bottom: 4px;
30
+ margin-left: 0px;
31
+ margin-right: 0px;
32
+ margin-top: 4px;
33
+ overflow-x: hidden;
34
+ overflow-y: hidden;
35
+ padding-bottom: 4px;
36
+ padding-left: 2px;
37
+ padding-right: 2px;
38
+ padding-top: 4px;
39
+ }
40
+
41
+ #parent #container .number {
42
+ color: rgb(0, 0, 0);
43
+ font-family: arial, helvetica, sans-serif;
44
+ font-size: 13.3333px;
45
+ width: 342.65px;
46
+ margin-right: 7px;
47
+ }
48
+
49
+ #parent #container .op {
50
+ color: rgb(0, 0, 0);
51
+ font-family: arial, helvetica, sans-serif;
52
+ font-size: 13.3333px;
53
+ margin-bottom: 8px;
54
+ margin-left: 0px;
55
+ margin-right: 0px;
56
+ margin-top: 4px;
57
+ overflow-x: hidden;
58
+ overflow-y: hidden;
59
+ }
60
+
61
+ #parent #container .op blockquote {
62
+ margin-left: 0px !important;
63
+ }
64
+
65
+ #parent #container .name {
66
+ color: rgb(17, 119, 67);
67
+ font-family: arial, helvetica, sans-serif;
68
+ font-size: 13.3333px;
69
+ font-weight: 700;
70
+ margin-left: 7px;
71
+ }
72
+
73
+ #parent #container .quote {
74
+ color: rgb(221, 0, 0);
75
+ font-family: arial, helvetica, sans-serif;
76
+ font-size: 13.3333px;
77
+ text-decoration-color: rgb(221, 0, 0);
78
+ text-decoration-line: underline;
79
+ text-decoration-style: solid;
80
+ text-decoration-thickness: auto;
81
+ }
82
+
83
+ #parent #container .greentext {
84
+ color: rgb(120, 153, 34);
85
+ font-family: arial, helvetica, sans-serif;
86
+ font-size: 13.3333px;
87
+ }
88
+
89
+ #parent #container blockquote {
90
+ margin: 0px !important;
91
+ margin-block-start: 1em;
92
+ margin-block-end: 1em;
93
+ margin-inline-start: 40px;
94
+ margin-inline-end: 40px;
95
+ margin-top: 13.33px !important;
96
+ margin-bottom: 13.33px !important;
97
+ margin-left: 40px !important;
98
+ margin-right: 40px !important;
99
+ }
100
+
101
+ #parent #container .message_4chan {
102
+ color: black;
103
+ border: none;
104
+ }
css/html_instruct_style.css ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .message {
2
+ display: grid;
3
+ grid-template-columns: 60px 1fr;
4
+ padding-bottom: 25px;
5
+ font-size: 15px;
6
+ font-family: Helvetica, Arial, sans-serif;
7
+ line-height: 1.428571429;
8
+ }
9
+
10
+ .username {
11
+ display: none;
12
+ }
13
+
14
+ .message-body p {
15
+ font-size: 15px !important;
16
+ line-height: 1.4 !important;
17
+ margin-bottom: 1.25em !important;
18
+ }
19
+
20
+ .message-body ul, .message-body ol {
21
+ margin-bottom: 1.25em !important;
22
+ }
23
+
24
+ .dark .message-body p em {
25
+ color: rgb(198, 202, 214) !important;
26
+ }
27
+
28
+ .message-body p em {
29
+ color: rgb(110, 110, 110) !important;
30
+ }
31
+
32
+ .gradio-container .chat .assistant-message {
33
+ padding: 15px;
34
+ border-radius: 20px;
35
+ background-color: #0000000f;
36
+ margin-top: 9px !important;
37
+ margin-bottom: 18px !important;
38
+ }
39
+
40
+ .gradio-container .chat .user-message {
41
+ padding: 15px;
42
+ border-radius: 20px;
43
+ margin-bottom: 9px !important;
44
+ }
45
+
46
+ .gradio-container .chat .assistant-message:last-child, .gradio-container .chat .user-message:last-child {
47
+ margin-bottom: 0px !important;
48
+ }
49
+
50
+ .dark .chat .assistant-message {
51
+ background-color: #1f2937;
52
+ border: 1px solid #4b5563;
53
+ }
54
+
55
+ .dark .chat .user-message {
56
+ background-color: #111827;
57
+ border: 1px solid #4b5563;
58
+ }
59
+
60
+ code {
61
+ background-color: white !important;
62
+ }
63
+
64
+ .dark code {
65
+ background-color: #0e1321 !important;
66
+ }
css/html_readable_style.css ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .container {
2
+ max-width: 600px;
3
+ margin-left: auto;
4
+ margin-right: auto;
5
+ background-color: rgb(31, 41, 55);
6
+ padding: 3em;
7
+ word-break: break-word;
8
+ overflow-wrap: anywhere;
9
+ color: #efefef !important;
10
+ }
11
+
12
+ .container p, .container li {
13
+ font-size: 16px !important;
14
+ color: #efefef !important;
15
+ margin-bottom: 22px;
16
+ line-height: 1.4 !important;
17
+ }
18
+
19
+ .container li > p {
20
+ display: inline !important;
21
+ }
22
+
23
+ .container code {
24
+ overflow-x: auto;
25
+ }
26
+
27
+ .container :not(pre) > code {
28
+ white-space: normal !important;
29
+ }
30
+
31
+ .container .hoverable {
32
+ font-size: 14px;
33
+ }
css/main.css ADDED
@@ -0,0 +1,470 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .tabs.svelte-710i53 {
2
+ margin-top: 0
3
+ }
4
+
5
+ .py-6 {
6
+ padding-top: 2.5rem
7
+ }
8
+
9
+ .small-button {
10
+ min-width: 0 !important;
11
+ max-width: 171px;
12
+ height: 39.594px;
13
+ align-self: end;
14
+ }
15
+
16
+ .refresh-button {
17
+ max-width: 4.4em;
18
+ min-width: 2.2em !important;
19
+ height: 39.594px;
20
+ align-self: end;
21
+ line-height: 1em;
22
+ border-radius: 0.5em;
23
+ flex: none;
24
+ }
25
+
26
+ .refresh-button-small {
27
+ max-width: 2.2em;
28
+ }
29
+
30
+ .button_nowrap {
31
+ white-space: nowrap;
32
+ }
33
+
34
+ #slim-column {
35
+ flex: none !important;
36
+ min-width: 0 !important;
37
+ }
38
+
39
+ .slim-dropdown {
40
+ background-color: transparent !important;
41
+ border: none !important;
42
+ padding: 0 !important;
43
+ }
44
+
45
+ #download-label, #upload-label {
46
+ min-height: 0
47
+ }
48
+
49
+ .dark svg {
50
+ fill: white;
51
+ }
52
+
53
+ .dark a {
54
+ color: white !important;
55
+ }
56
+
57
+ ol li p, ul li p {
58
+ display: inline-block;
59
+ }
60
+
61
+ #chat-tab, #default-tab, #notebook-tab, #parameters, #chat-settings, #lora, #training-tab, #model-tab, #session-tab {
62
+ border: 0;
63
+ }
64
+
65
+ .gradio-container-3-18-0 .prose * h1, h2, h3, h4 {
66
+ color: white;
67
+ }
68
+
69
+ .gradio-container {
70
+ max-width: 100% !important;
71
+ padding-top: 0 !important;
72
+ }
73
+
74
+ #extensions {
75
+ margin-top: 5px;
76
+ margin-bottom: 35px;
77
+ }
78
+
79
+ .extension-tab {
80
+ border: 0 !important;
81
+ }
82
+
83
+ span.math.inline {
84
+ font-size: 27px;
85
+ vertical-align: baseline !important;
86
+ }
87
+
88
+ div.svelte-15lo0d8 > *, div.svelte-15lo0d8 > .form > * {
89
+ flex-wrap: nowrap;
90
+ }
91
+
92
+ .header_bar {
93
+ background-color: #f7f7f7;
94
+ margin-bottom: 20px;
95
+ display: inline !important;
96
+ overflow-x: scroll;
97
+ }
98
+
99
+ .dark .header_bar {
100
+ border: none !important;
101
+ background-color: #8080802b;
102
+ }
103
+
104
+ .header_bar button.selected {
105
+ border-radius: 0;
106
+ }
107
+
108
+ .textbox_default textarea {
109
+ height: calc(100dvh - 280px);
110
+ }
111
+
112
+ .textbox_default_output textarea {
113
+ height: calc(100dvh - 190px);
114
+ }
115
+
116
+ .textbox textarea {
117
+ height: calc(100dvh - 241px);
118
+ }
119
+
120
+ .textbox_logits textarea {
121
+ height: calc(100dvh - 241px);
122
+ }
123
+
124
+ .textbox_logits_notebook textarea {
125
+ height: calc(100dvh - 292px);
126
+ }
127
+
128
+ .textbox_default textarea,
129
+ .textbox_default_output textarea,
130
+ .textbox_logits textarea,
131
+ .textbox_logits_notebook textarea,
132
+ .textbox textarea
133
+ {
134
+ font-size: 16px !important;
135
+ color: #46464A !important;
136
+ }
137
+
138
+ .dark textarea {
139
+ color: #efefef !important;
140
+ }
141
+
142
+ @media screen and (max-width: 711px) {
143
+ .textbox_default textarea {
144
+ height: calc(100dvh - 271px);
145
+ }
146
+
147
+ div .default-token-counter {
148
+ top: calc( 0.5 * (100dvh - 245px) ) !important;
149
+ }
150
+ }
151
+
152
+ /* Hide the gradio footer*/
153
+ footer {
154
+ display: none !important;
155
+ }
156
+
157
+ button {
158
+ font-size: 14px !important;
159
+ }
160
+
161
+ .file-saver {
162
+ position: fixed !important;
163
+ top: 50%;
164
+ left: 50%;
165
+ transform: translate(-50%, -50%); /* center horizontally */
166
+ max-width: 500px;
167
+ background-color: var(--input-background-fill);
168
+ border: 2px solid black !important;
169
+ z-index: 1000;
170
+ }
171
+
172
+ .dark .file-saver {
173
+ border: 2px solid white !important;
174
+ }
175
+
176
+ .checkboxgroup-table label {
177
+ background: none !important;
178
+ padding: 0 !important;
179
+ border: 0 !important;
180
+ }
181
+
182
+ .checkboxgroup-table div {
183
+ display: grid !important;
184
+ }
185
+
186
+ .markdown ul ol {
187
+ font-size: 100% !important;
188
+ }
189
+
190
+ .pretty_scrollbar::-webkit-scrollbar {
191
+ width: 5px;
192
+ }
193
+
194
+ .pretty_scrollbar::-webkit-scrollbar-track {
195
+ background: transparent;
196
+ }
197
+
198
+ .pretty_scrollbar::-webkit-scrollbar-thumb,
199
+ .pretty_scrollbar::-webkit-scrollbar-thumb:hover {
200
+ background: #c5c5d2;
201
+ }
202
+
203
+ .dark .pretty_scrollbar::-webkit-scrollbar-thumb,
204
+ .dark .pretty_scrollbar::-webkit-scrollbar-thumb:hover {
205
+ background: #374151;
206
+ }
207
+
208
+ .pretty_scrollbar::-webkit-resizer {
209
+ background: #c5c5d2;
210
+ }
211
+
212
+ .dark .pretty_scrollbar::-webkit-resizer {
213
+ background: #374151;
214
+ }
215
+
216
+ audio {
217
+ max-width: 100%;
218
+ }
219
+
220
+ /* Copied from https://github.com/AUTOMATIC1111/stable-diffusion-webui */
221
+ .token-counter {
222
+ position: absolute !important;
223
+ top: calc( 0.5 * (100dvh - 215px) ) !important;
224
+ right: 2px;
225
+ z-index: 100;
226
+ background: var(--input-background-fill) !important;
227
+ min-height: 0 !important;
228
+ }
229
+
230
+ .default-token-counter {
231
+ top: calc( 0.5 * (100dvh - 255px) ) !important;
232
+ }
233
+
234
+ .token-counter span {
235
+ padding: 1px;
236
+ box-shadow: 0 0 0 0.3em rgba(192,192,192,0.15), inset 0 0 0.6em rgba(192,192,192,0.075);
237
+ border: 2px solid rgba(192,192,192,0.4) !important;
238
+ border-radius: 0.4em;
239
+ }
240
+
241
+ .no-background {
242
+ background: var(--background-fill-primary) !important;
243
+ padding: 0px !important;
244
+ }
245
+
246
+ /*****************************************************/
247
+ /*************** Chat UI declarations ****************/
248
+ /*****************************************************/
249
+
250
+ .h-\[40vh\], .wrap.svelte-byatnx.svelte-byatnx.svelte-byatnx {
251
+ height: 66.67vh
252
+ }
253
+
254
+ .gradio-container {
255
+ margin-left: auto !important;
256
+ margin-right: auto !important;
257
+ }
258
+
259
+ .w-screen {
260
+ width: unset
261
+ }
262
+
263
+ div.svelte-362y77>*, div.svelte-362y77>.form>* {
264
+ flex-wrap: nowrap
265
+ }
266
+
267
+ .pending.svelte-1ed2p3z {
268
+ opacity: 1;
269
+ }
270
+
271
+ .wrap.svelte-6roggh.svelte-6roggh {
272
+ max-height: 92.5%;
273
+ }
274
+
275
+ /* This is for the microphone button in the whisper extension */
276
+ .sm.svelte-1ipelgc {
277
+ width: 100%;
278
+ }
279
+
280
+ #chat-tab button, #notebook-tab button, #default-tab button {
281
+ min-width: 0 !important;
282
+ }
283
+
284
+ #chat-tab > :first-child, #extensions {
285
+ max-width: 800px;
286
+ margin-left: auto;
287
+ margin-right: auto;
288
+ }
289
+
290
+ @media screen and (max-width: 688px) {
291
+ #chat-tab {
292
+ padding: 0px;
293
+ }
294
+
295
+ .chat-parent {
296
+ height: calc(100dvh - 262px) !important;
297
+ }
298
+
299
+ .bigchat {
300
+ height: calc(100dvh - 180px) !important;
301
+ }
302
+ }
303
+
304
+ .chat {
305
+ margin-left: auto;
306
+ margin-right: auto;
307
+ max-width: 800px;
308
+ height: 100%;
309
+ overflow-y: auto;
310
+ padding-right: 15px;
311
+ display: flex;
312
+ flex-direction: column;
313
+ word-break: break-word;
314
+ overflow-wrap: anywhere;
315
+ padding-top: 6px;
316
+ }
317
+
318
+ .chat-parent {
319
+ height: calc(100dvh - 272px);
320
+ overflow: auto !important;
321
+ }
322
+
323
+ .bigchat {
324
+ height: calc(100dvh - 200px);
325
+ }
326
+
327
+ .chat > .messages {
328
+ display: flex;
329
+ flex-direction: column;
330
+ }
331
+
332
+ .chat .message:last-child {
333
+ margin-bottom: 0px !important;
334
+ padding-bottom: 0px !important;
335
+ }
336
+
337
+ .message-body li {
338
+ margin-top: 0 !important;
339
+ margin-bottom: 0 !important;
340
+ }
341
+
342
+ .message-body li > p {
343
+ display: inline !important;
344
+ }
345
+
346
+ .message-body ul, .message-body ol {
347
+ font-size: 15px !important;
348
+ }
349
+
350
+ .message-body ul {
351
+ list-style-type: disc !important;
352
+ }
353
+
354
+ .message-body pre {
355
+ margin-bottom: 1.25em !important;
356
+ }
357
+
358
+ .message-body code {
359
+ white-space: pre-wrap !important;
360
+ word-wrap: break-word !important;
361
+ }
362
+
363
+ .message-body :not(pre) > code {
364
+ white-space: normal !important;
365
+ }
366
+
367
+ #chat-input {
368
+ padding: 0;
369
+ padding-top: 18px;
370
+ background: var(--background-fill-primary);
371
+ border: none;
372
+ }
373
+
374
+ #chat-input textarea:focus {
375
+ box-shadow: none !important;
376
+ }
377
+
378
+ @media print {
379
+ body {
380
+ visibility: hidden;
381
+ }
382
+
383
+ .chat {
384
+ visibility: visible;
385
+ position: absolute;
386
+ left: 0;
387
+ top: 0;
388
+ max-width: unset;
389
+ max-height: unset;
390
+ width: 100%;
391
+ overflow-y: visible;
392
+ }
393
+
394
+ .message {
395
+ break-inside: avoid;
396
+ }
397
+
398
+ .gradio-container {
399
+ overflow: visible;
400
+ }
401
+
402
+ .tab-nav {
403
+ display: none !important;
404
+ }
405
+
406
+ #chat-tab > :first-child {
407
+ max-width: unset;
408
+ }
409
+ }
410
+
411
+ #show-controls {
412
+ position: absolute;
413
+ background-color: transparent;
414
+ left: calc(100% - 180px);
415
+ }
416
+
417
+ #typing-container {
418
+ display: none;
419
+ position: absolute;
420
+ background-color: transparent;
421
+ left: -2px;
422
+ padding: var(--block-padding);
423
+ }
424
+
425
+ .typing {
426
+ position: relative;
427
+ }
428
+
429
+ .visible-dots #typing-container {
430
+ display: block;
431
+ }
432
+
433
+ .typing span {
434
+ content: '';
435
+ animation: blink 1.5s infinite;
436
+ animation-fill-mode: both;
437
+ height: 10px;
438
+ width: 10px;
439
+ background: #3b5998;;
440
+ position: absolute;
441
+ left:0;
442
+ top:0;
443
+ border-radius: 50%;
444
+ }
445
+
446
+ .typing .dot1 {
447
+ animation-delay: .2s;
448
+ margin-left: calc(10px * 1.5);
449
+ }
450
+
451
+ .typing .dot2 {
452
+ animation-delay: .4s;
453
+ margin-left: calc(10px * 3);
454
+ }
455
+
456
+ @keyframes blink {
457
+ 0% {
458
+ opacity: .1;
459
+ }
460
+ 20% {
461
+ opacity: 1;
462
+ }
463
+ 100% {
464
+ opacity: .1;
465
+ }
466
+ }
467
+
468
+ #chat-tab .generating {
469
+ display: none !important;
470
+ }
docker-compose.yml ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ version: "3.3"
2
+ services:
3
+ text-generation-webui:
4
+ build:
5
+ context: .
6
+ args:
7
+ # specify which cuda version your card supports: https://developer.nvidia.com/cuda-gpus
8
+ TORCH_CUDA_ARCH_LIST: ${TORCH_CUDA_ARCH_LIST:-7.5}
9
+ WEBUI_VERSION: ${WEBUI_VERSION:-HEAD}
10
+ env_file: .env
11
+ ports:
12
+ - "${HOST_PORT:-7860}:${CONTAINER_PORT:-7860}"
13
+ - "${HOST_API_PORT:-5000}:${CONTAINER_API_PORT:-5000}"
14
+ - "${HOST_API_STREAM_PORT:-5005}:${CONTAINER_API_STREAM_PORT:-5005}"
15
+ stdin_open: true
16
+ tty: true
17
+ volumes:
18
+ - ./characters:/app/characters
19
+ - ./extensions:/app/extensions
20
+ - ./loras:/app/loras
21
+ - ./models:/app/models
22
+ - ./presets:/app/presets
23
+ - ./prompts:/app/prompts
24
+ - ./softprompts:/app/softprompts
25
+ - ./training:/app/training
26
+ - ./cloudflared:/etc/cloudflared
27
+ deploy:
28
+ resources:
29
+ reservations:
30
+ devices:
31
+ - driver: nvidia
32
+ device_ids: ['0']
33
+ capabilities: [gpu]
docker/.dockerignore ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ .env
2
+ Dockerfile
3
+ /characters
4
+ /loras
5
+ /models
6
+ /presets
7
+ /prompts
8
+ /softprompts
9
+ /training
docker/.env.example ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # by default the Dockerfile specifies these versions: 3.5;5.0;6.0;6.1;7.0;7.5;8.0;8.6+PTX
2
+ # however for me to work i had to specify the exact version for my card ( 2060 ) it was 7.5
3
+ # https://developer.nvidia.com/cuda-gpus you can find the version for your card here
4
+ TORCH_CUDA_ARCH_LIST=7.5
5
+
6
+ # these commands worked for me with roughly 4.5GB of vram
7
+ CLI_ARGS=--model llama-7b-4bit --wbits 4 --listen --auto-devices
8
+
9
+ # the following examples have been tested with the files linked in docs/README_docker.md:
10
+ # example running 13b with 4bit/128 groupsize : CLI_ARGS=--model llama-13b-4bit-128g --wbits 4 --listen --groupsize 128 --pre_layer 25
11
+ # example with loading api extension and public share: CLI_ARGS=--model llama-7b-4bit --wbits 4 --listen --auto-devices --no-stream --extensions api --share
12
+ # example running 7b with 8bit groupsize : CLI_ARGS=--model llama-7b --load-in-8bit --listen --auto-devices
13
+
14
+ # the port the webui binds to on the host
15
+ HOST_PORT=7860
16
+ # the port the webui binds to inside the container
17
+ CONTAINER_PORT=7860
18
+
19
+ # the port the api binds to on the host
20
+ HOST_API_PORT=5000
21
+ # the port the api binds to inside the container
22
+ CONTAINER_API_PORT=5000
23
+
24
+ # the port the api stream endpoint binds to on the host
25
+ HOST_API_STREAM_PORT=5005
26
+ # the port the api stream endpoint binds to inside the container
27
+ CONTAINER_API_STREAM_PORT=5005
28
+
29
+ # the version used to install text-generation-webui from
30
+ WEBUI_VERSION=HEAD
docker/Dockerfile ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM nvidia/cuda:11.8.0-devel-ubuntu22.04 as builder
2
+
3
+ RUN apt-get update && \
4
+ apt-get install --no-install-recommends -y git vim build-essential python3-dev python3-venv && \
5
+ rm -rf /var/lib/apt/lists/*
6
+
7
+ RUN git clone https://github.com/oobabooga/GPTQ-for-LLaMa /build
8
+
9
+ WORKDIR /build
10
+
11
+ RUN python3 -m venv /build/venv
12
+ RUN . /build/venv/bin/activate && \
13
+ pip3 install --upgrade pip setuptools wheel && \
14
+ pip3 install torch torchvision torchaudio && \
15
+ pip3 install -r requirements.txt
16
+
17
+ # https://developer.nvidia.com/cuda-gpus
18
+ # for a rtx 2060: ARG TORCH_CUDA_ARCH_LIST="7.5"
19
+ ARG TORCH_CUDA_ARCH_LIST="${TORCH_CUDA_ARCH_LIST:-3.5;5.0;6.0;6.1;7.0;7.5;8.0;8.6+PTX}"
20
+ RUN . /build/venv/bin/activate && \
21
+ python3 setup_cuda.py bdist_wheel -d .
22
+
23
+ FROM nvidia/cuda:11.8.0-runtime-ubuntu22.04
24
+
25
+ LABEL maintainer="Your Name <your.email@example.com>"
26
+ LABEL description="Docker image for GPTQ-for-LLaMa and Text Generation WebUI"
27
+
28
+ RUN apt-get update && \
29
+ apt-get install --no-install-recommends -y python3-dev libportaudio2 libasound-dev git python3 python3-pip make g++ ffmpeg && \
30
+ rm -rf /var/lib/apt/lists/*
31
+
32
+ RUN --mount=type=cache,target=/root/.cache/pip pip3 install virtualenv
33
+ RUN mkdir /app
34
+
35
+ WORKDIR /app
36
+
37
+ ARG WEBUI_VERSION
38
+ RUN test -n "${WEBUI_VERSION}" && git reset --hard ${WEBUI_VERSION} || echo "Using provided webui source"
39
+
40
+ RUN virtualenv /app/venv
41
+ RUN . /app/venv/bin/activate && \
42
+ pip3 install --upgrade pip setuptools wheel && \
43
+ pip3 install torch torchvision torchaudio
44
+
45
+ COPY --from=builder /build /app/repositories/GPTQ-for-LLaMa
46
+ RUN . /app/venv/bin/activate && \
47
+ pip3 install /app/repositories/GPTQ-for-LLaMa/*.whl
48
+
49
+ COPY extensions/api/requirements.txt /app/extensions/api/requirements.txt
50
+ COPY extensions/elevenlabs_tts/requirements.txt /app/extensions/elevenlabs_tts/requirements.txt
51
+ COPY extensions/google_translate/requirements.txt /app/extensions/google_translate/requirements.txt
52
+ COPY extensions/silero_tts/requirements.txt /app/extensions/silero_tts/requirements.txt
53
+ COPY extensions/whisper_stt/requirements.txt /app/extensions/whisper_stt/requirements.txt
54
+ COPY extensions/superbooga/requirements.txt /app/extensions/superbooga/requirements.txt
55
+ COPY extensions/openai/requirements.txt /app/extensions/openai/requirements.txt
56
+ RUN --mount=type=cache,target=/root/.cache/pip . /app/venv/bin/activate && cd extensions/api && pip3 install -r requirements.txt
57
+ RUN --mount=type=cache,target=/root/.cache/pip . /app/venv/bin/activate && cd extensions/elevenlabs_tts && pip3 install -r requirements.txt
58
+ RUN --mount=type=cache,target=/root/.cache/pip . /app/venv/bin/activate && cd extensions/google_translate && pip3 install -r requirements.txt
59
+ RUN --mount=type=cache,target=/root/.cache/pip . /app/venv/bin/activate && cd extensions/silero_tts && pip3 install -r requirements.txt
60
+ RUN --mount=type=cache,target=/root/.cache/pip . /app/venv/bin/activate && cd extensions/whisper_stt && pip3 install -r requirements.txt
61
+ RUN --mount=type=cache,target=/root/.cache/pip . /app/venv/bin/activate && cd extensions/superbooga && pip3 install -r requirements.txt
62
+ RUN --mount=type=cache,target=/root/.cache/pip . /app/venv/bin/activate && cd extensions/openai && pip3 install -r requirements.txt
63
+
64
+ COPY requirements.txt /app/requirements.txt
65
+ RUN . /app/venv/bin/activate && \
66
+ pip3 install -r requirements.txt
67
+
68
+ RUN cp /app/venv/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda118.so /app/venv/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cpu.so
69
+
70
+ COPY . /app/
71
+ ENV CLI_ARGS=""
72
+ CMD . /app/venv/bin/activate && python3 server.py ${CLI_ARGS}
docker/docker-compose.yml ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ version: "3.3"
2
+ services:
3
+ text-generation-webui:
4
+ build:
5
+ context: .
6
+ args:
7
+ # specify which cuda version your card supports: https://developer.nvidia.com/cuda-gpus
8
+ TORCH_CUDA_ARCH_LIST: ${TORCH_CUDA_ARCH_LIST:-7.5}
9
+ WEBUI_VERSION: ${WEBUI_VERSION:-HEAD}
10
+ env_file: .env
11
+ ports:
12
+ - "${HOST_PORT:-7860}:${CONTAINER_PORT:-7860}"
13
+ - "${HOST_API_PORT:-5000}:${CONTAINER_API_PORT:-5000}"
14
+ - "${HOST_API_STREAM_PORT:-5005}:${CONTAINER_API_STREAM_PORT:-5005}"
15
+ stdin_open: true
16
+ tty: true
17
+ volumes:
18
+ - ./characters:/app/characters
19
+ - ./extensions:/app/extensions
20
+ - ./loras:/app/loras
21
+ - ./models:/app/models
22
+ - ./presets:/app/presets
23
+ - ./prompts:/app/prompts
24
+ - ./softprompts:/app/softprompts
25
+ - ./training:/app/training
26
+ - ./cloudflared:/etc/cloudflared
27
+ deploy:
28
+ resources:
29
+ reservations:
30
+ devices:
31
+ - driver: nvidia
32
+ device_ids: ['0']
33
+ capabilities: [gpu]
docs/Audio-Notification.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Audio notification
2
+
3
+ If your computer takes a long time to generate each response for the model that you are using, you can enable an audio notification for when the response is completed. This feature was kindly contributed by HappyWorldGames in [#1277](https://github.com/oobabooga/text-generation-webui/pull/1277).
4
+
5
+ ### Installation
6
+
7
+ Simply place a file called "notification.mp3" in the same folder as `server.py`. Here you can find some examples:
8
+
9
+ * https://pixabay.com/sound-effects/search/ding/?duration=0-30
10
+ * https://pixabay.com/sound-effects/search/notification/?duration=0-30
11
+
12
+ Source: https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/1126
13
+
14
+ This file will be automatically detected the next time you start the web UI.
docs/Chat-mode.md ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Chat characters
2
+
3
+ Custom chat mode characters are defined by `.yaml` files inside the `characters` folder. An example is included: [Example.yaml](https://github.com/oobabooga/text-generation-webui/blob/main/characters/Example.yaml).
4
+
5
+ The following fields may be defined:
6
+
7
+ | Field | Description |
8
+ |-------|-------------|
9
+ | `name` or `bot` | The character's name. |
10
+ | `context` | A string that appears at the top of the prompt. It usually contains a description of the character's personality and a few example messages. |
11
+ | `greeting` (optional) | The character's opening message. It appears when the character is first loaded or when the history is cleared. |
12
+ | `your_name` or `user` (optional) | Your name. This overwrites what you had previously written in the `Your name` field in the interface. |
13
+
14
+ #### Special tokens
15
+
16
+ The following replacements happen when the prompt is generated, and they apply to the `context` and `greeting` fields:
17
+
18
+ * `{{char}}` and `<BOT>` get replaced with the character's name.
19
+ * `{{user}}` and `<USER>` get replaced with your name.
20
+
21
+ #### How do I add a profile picture for my character?
22
+
23
+ Put an image with the same name as your character's `.yaml` file into the `characters` folder. For example, if your bot is `Character.yaml`, add `Character.jpg` or `Character.png` to the folder.
24
+
25
+ #### Is the chat history truncated in the prompt?
26
+
27
+ Once your prompt reaches the `truncation_length` parameter (2048 by default), old messages will be removed one at a time. The context string will always stay at the top of the prompt and will never get truncated.
28
+
29
+ ## Chat styles
30
+
31
+ Custom chat styles can be defined in the `text-generation-webui/css` folder. Simply create a new file with name starting in `chat_style-` and ending in `.css` and it will automatically appear in the "Chat style" dropdown menu in the interface. Examples:
32
+
33
+ ```
34
+ chat_style-cai-chat.css
35
+ chat_style-TheEncrypted777.css
36
+ chat_style-wpp.css
37
+ ```
38
+
39
+ You should use the same class names as in `chat_style-cai-chat.css` in your custom style.
docs/DeepSpeed.md ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ An alternative way of reducing the GPU memory usage of models is to use the `DeepSpeed ZeRO-3` optimization.
2
+
3
+ With this, I have been able to load a 6b model (GPT-J 6B) with less than 6GB of VRAM. The speed of text generation is very decent and much better than what would be accomplished with `--auto-devices --gpu-memory 6`.
4
+
5
+ As far as I know, DeepSpeed is only available for Linux at the moment.
6
+
7
+ ### How to use it
8
+
9
+ 1. Install DeepSpeed:
10
+
11
+ ```
12
+ conda install -c conda-forge mpi4py mpich
13
+ pip install -U deepspeed
14
+ ```
15
+
16
+ 2. Start the web UI replacing `python` with `deepspeed --num_gpus=1` and adding the `--deepspeed` flag. Example:
17
+
18
+ ```
19
+ deepspeed --num_gpus=1 server.py --deepspeed --chat --model gpt-j-6B
20
+ ```
21
+
22
+ ### Learn more
23
+
24
+ For more information, check out [this comment](https://github.com/oobabooga/text-generation-webui/issues/40#issuecomment-1412038622) by 81300, who came up with the DeepSpeed support in this web UI.
docs/Docker.md ADDED
@@ -0,0 +1,203 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Docker Compose is a way of installing and launching the web UI in an isolated Ubuntu image using only a few commands.
2
+
3
+ In order to create the image as described in the main README, you must have docker compose 2.17 or higher:
4
+
5
+ ```
6
+ ~$ docker compose version
7
+ Docker Compose version v2.17.2
8
+ ```
9
+
10
+ Make sure to also create the necessary symbolic links:
11
+
12
+ ```
13
+ cd text-generation-webui
14
+ ln -s docker/{Dockerfile,docker-compose.yml,.dockerignore} .
15
+ cp docker/.env.example .env
16
+ # Edit .env and set TORCH_CUDA_ARCH_LIST based on your GPU model
17
+ docker compose up --build
18
+ ```
19
+
20
+ # Table of contents
21
+
22
+ * [Docker Compose installation instructions](#docker-compose-installation-instructions)
23
+ * [Repository with additional Docker files](#dedicated-docker-repository)
24
+
25
+ # Docker Compose installation instructions
26
+
27
+ By [@loeken](https://github.com/loeken).
28
+
29
+ - [Ubuntu 22.04](#ubuntu-2204)
30
+ - [0. youtube video](#0-youtube-video)
31
+ - [1. update the drivers](#1-update-the-drivers)
32
+ - [2. reboot](#2-reboot)
33
+ - [3. install docker](#3-install-docker)
34
+ - [4. docker \& container toolkit](#4-docker--container-toolkit)
35
+ - [5. clone the repo](#5-clone-the-repo)
36
+ - [6. prepare models](#6-prepare-models)
37
+ - [7. prepare .env file](#7-prepare-env-file)
38
+ - [8. startup docker container](#8-startup-docker-container)
39
+ - [Manjaro](#manjaro)
40
+ - [update the drivers](#update-the-drivers)
41
+ - [reboot](#reboot)
42
+ - [docker \& container toolkit](#docker--container-toolkit)
43
+ - [continue with ubuntu task](#continue-with-ubuntu-task)
44
+ - [Windows](#windows)
45
+ - [0. youtube video](#0-youtube-video-1)
46
+ - [1. choco package manager](#1-choco-package-manager)
47
+ - [2. install drivers/dependencies](#2-install-driversdependencies)
48
+ - [3. install wsl](#3-install-wsl)
49
+ - [4. reboot](#4-reboot)
50
+ - [5. git clone \&\& startup](#5-git-clone--startup)
51
+ - [6. prepare models](#6-prepare-models-1)
52
+ - [7. startup](#7-startup)
53
+ - [notes](#notes)
54
+
55
+ ## Ubuntu 22.04
56
+
57
+ ### 0. youtube video
58
+ A video walking you through the setup can be found here:
59
+
60
+ [![oobabooga text-generation-webui setup in docker on ubuntu 22.04](https://img.youtube.com/vi/ELkKWYh8qOk/0.jpg)](https://www.youtube.com/watch?v=ELkKWYh8qOk)
61
+
62
+
63
+ ### 1. update the drivers
64
+ in the the “software updater” update drivers to the last version of the prop driver.
65
+
66
+ ### 2. reboot
67
+ to switch using to new driver
68
+
69
+ ### 3. install docker
70
+ ```bash
71
+ sudo apt update
72
+ sudo apt-get install curl
73
+ sudo mkdir -m 0755 -p /etc/apt/keyrings
74
+ curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg
75
+ echo \
76
+ "deb [arch="$(dpkg --print-architecture)" signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \
77
+ "$(. /etc/os-release && echo "$VERSION_CODENAME")" stable" | \
78
+ sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
79
+ sudo apt update
80
+ sudo apt-get install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin docker-compose -y
81
+ sudo usermod -aG docker $USER
82
+ newgrp docker
83
+ ```
84
+
85
+ ### 4. docker & container toolkit
86
+ ```bash
87
+ curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | sudo gpg --dearmor -o /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg
88
+ echo "deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://nvidia.github.io/libnvidia-container/stable/ubuntu22.04/amd64 /" | \
89
+ sudo tee /etc/apt/sources.list.d/nvidia.list > /dev/null
90
+ sudo apt update
91
+ sudo apt install nvidia-docker2 nvidia-container-runtime -y
92
+ sudo systemctl restart docker
93
+ ```
94
+
95
+ ### 5. clone the repo
96
+ ```
97
+ git clone https://github.com/oobabooga/text-generation-webui
98
+ cd text-generation-webui
99
+ ```
100
+
101
+ ### 6. prepare models
102
+ download and place the models inside the models folder. tested with:
103
+
104
+ 4bit
105
+ https://github.com/oobabooga/text-generation-webui/pull/530#issuecomment-1483891617
106
+ https://github.com/oobabooga/text-generation-webui/pull/530#issuecomment-1483941105
107
+
108
+ 8bit:
109
+ https://github.com/oobabooga/text-generation-webui/pull/530#issuecomment-1484235789
110
+
111
+ ### 7. prepare .env file
112
+ edit .env values to your needs.
113
+ ```bash
114
+ cp .env.example .env
115
+ nano .env
116
+ ```
117
+
118
+ ### 8. startup docker container
119
+ ```bash
120
+ docker compose up --build
121
+ ```
122
+
123
+ ## Manjaro
124
+ manjaro/arch is similar to ubuntu just the dependency installation is more convenient
125
+
126
+ ### update the drivers
127
+ ```bash
128
+ sudo mhwd -a pci nonfree 0300
129
+ ```
130
+ ### reboot
131
+ ```bash
132
+ reboot
133
+ ```
134
+ ### docker & container toolkit
135
+ ```bash
136
+ yay -S docker docker-compose buildkit gcc nvidia-docker
137
+ sudo usermod -aG docker $USER
138
+ newgrp docker
139
+ sudo systemctl restart docker # required by nvidia-container-runtime
140
+ ```
141
+
142
+ ### continue with ubuntu task
143
+ continue at [5. clone the repo](#5-clone-the-repo)
144
+
145
+ ## Windows
146
+ ### 0. youtube video
147
+ A video walking you through the setup can be found here:
148
+ [![oobabooga text-generation-webui setup in docker on windows 11](https://img.youtube.com/vi/ejH4w5b5kFQ/0.jpg)](https://www.youtube.com/watch?v=ejH4w5b5kFQ)
149
+
150
+ ### 1. choco package manager
151
+ install package manager (https://chocolatey.org/ )
152
+ ```
153
+ Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1'))
154
+ ```
155
+
156
+ ### 2. install drivers/dependencies
157
+ ```
158
+ choco install nvidia-display-driver cuda git docker-desktop
159
+ ```
160
+
161
+ ### 3. install wsl
162
+ wsl --install
163
+
164
+ ### 4. reboot
165
+ after reboot enter username/password in wsl
166
+
167
+ ### 5. git clone && startup
168
+ clone the repo and edit .env values to your needs.
169
+ ```
170
+ cd Desktop
171
+ git clone https://github.com/oobabooga/text-generation-webui
172
+ cd text-generation-webui
173
+ COPY .env.example .env
174
+ notepad .env
175
+ ```
176
+
177
+ ### 6. prepare models
178
+ download and place the models inside the models folder. tested with:
179
+
180
+ 4bit https://github.com/oobabooga/text-generation-webui/pull/530#issuecomment-1483891617 https://github.com/oobabooga/text-generation-webui/pull/530#issuecomment-1483941105
181
+
182
+ 8bit: https://github.com/oobabooga/text-generation-webui/pull/530#issuecomment-1484235789
183
+
184
+ ### 7. startup
185
+ ```
186
+ docker compose up
187
+ ```
188
+
189
+ ## notes
190
+
191
+ on older ubuntus you can manually install the docker compose plugin like this:
192
+ ```
193
+ DOCKER_CONFIG=${DOCKER_CONFIG:-$HOME/.docker}
194
+ mkdir -p $DOCKER_CONFIG/cli-plugins
195
+ curl -SL https://github.com/docker/compose/releases/download/v2.17.2/docker-compose-linux-x86_64 -o $DOCKER_CONFIG/cli-plugins/docker-compose
196
+ chmod +x $DOCKER_CONFIG/cli-plugins/docker-compose
197
+ export PATH="$HOME/.docker/cli-plugins:$PATH"
198
+ ```
199
+
200
+ # Dedicated docker repository
201
+
202
+ An external repository maintains a docker wrapper for this project as well as several pre-configured 'one-click' `docker compose` variants (e.g., updated branches of GPTQ). It can be found at: [Atinoda/text-generation-webui-docker](https://github.com/Atinoda/text-generation-webui-docker).
203
+
docs/ExLlama.md ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ExLlama
2
+
3
+ ### About
4
+
5
+ ExLlama is an extremely optimized GPTQ backend for LLaMA models. It features much lower VRAM usage and much higher speeds due to not relying on unoptimized transformers code.
6
+
7
+ ### Usage
8
+
9
+ Configure text-generation-webui to use exllama via the UI or command line:
10
+ - In the "Model" tab, set "Loader" to "exllama"
11
+ - Specify `--loader exllama` on the command line
12
+
13
+ ### Manual setup
14
+
15
+ No additional installation steps are necessary since an exllama package is already included in the requirements.txt. If this package fails to install for some reason, you can install it manually by cloning the original repository into your `repositories/` folder:
16
+
17
+ ```
18
+ mkdir repositories
19
+ cd repositories
20
+ git clone https://github.com/turboderp/exllama
21
+ ```
22
+
docs/Extensions.md ADDED
@@ -0,0 +1,244 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Extensions
2
+
3
+ Extensions are defined by files named `script.py` inside subfolders of `text-generation-webui/extensions`. They are loaded at startup if the folder name is specified after the `--extensions` flag.
4
+
5
+ For instance, `extensions/silero_tts/script.py` gets loaded with `python server.py --extensions silero_tts`.
6
+
7
+ ## [text-generation-webui-extensions](https://github.com/oobabooga/text-generation-webui-extensions)
8
+
9
+ The repository above contains a directory of user extensions.
10
+
11
+ If you create an extension, you are welcome to host it in a GitHub repository and submit a PR adding it to the list.
12
+
13
+ ## Built-in extensions
14
+
15
+ |Extension|Description|
16
+ |---------|-----------|
17
+ |[api](https://github.com/oobabooga/text-generation-webui/tree/main/extensions/api)| Creates an API with two endpoints, one for streaming at `/api/v1/stream` port 5005 and another for blocking at `/api/v1/generate` port 5000. This is the main API for the webui. |
18
+ |[openai](https://github.com/oobabooga/text-generation-webui/tree/main/extensions/openai)| Creates an API that mimics the OpenAI API and can be used as a drop-in replacement. |
19
+ |[multimodal](https://github.com/oobabooga/text-generation-webui/tree/main/extensions/multimodal) | Adds multimodality support (text+images). For a detailed description see [README.md](https://github.com/oobabooga/text-generation-webui/tree/main/extensions/multimodal/README.md) in the extension directory. |
20
+ |[google_translate](https://github.com/oobabooga/text-generation-webui/tree/main/extensions/google_translate)| Automatically translates inputs and outputs using Google Translate.|
21
+ |[silero_tts](https://github.com/oobabooga/text-generation-webui/tree/main/extensions/silero_tts)| Text-to-speech extension using [Silero](https://github.com/snakers4/silero-models). When used in chat mode, responses are replaced with an audio widget. |
22
+ |[elevenlabs_tts](https://github.com/oobabooga/text-generation-webui/tree/main/extensions/elevenlabs_tts)| Text-to-speech extension using the [ElevenLabs](https://beta.elevenlabs.io/) API. You need an API key to use it. |
23
+ |[whisper_stt](https://github.com/oobabooga/text-generation-webui/tree/main/extensions/whisper_stt)| Allows you to enter your inputs in chat mode using your microphone. |
24
+ |[sd_api_pictures](https://github.com/oobabooga/text-generation-webui/tree/main/extensions/sd_api_pictures)| Allows you to request pictures from the bot in chat mode, which will be generated using the AUTOMATIC1111 Stable Diffusion API. See examples [here](https://github.com/oobabooga/text-generation-webui/pull/309). |
25
+ |[character_bias](https://github.com/oobabooga/text-generation-webui/tree/main/extensions/character_bias)| Just a very simple example that adds a hidden string at the beginning of the bot's reply in chat mode. |
26
+ |[send_pictures](https://github.com/oobabooga/text-generation-webui/blob/main/extensions/send_pictures/)| Creates an image upload field that can be used to send images to the bot in chat mode. Captions are automatically generated using BLIP. |
27
+ |[gallery](https://github.com/oobabooga/text-generation-webui/blob/main/extensions/gallery/)| Creates a gallery with the chat characters and their pictures. |
28
+ |[superbooga](https://github.com/oobabooga/text-generation-webui/tree/main/extensions/superbooga)| An extension that uses ChromaDB to create an arbitrarily large pseudocontext, taking as input text files, URLs, or pasted text. Based on https://github.com/kaiokendev/superbig. |
29
+ |[ngrok](https://github.com/oobabooga/text-generation-webui/tree/main/extensions/ngrok)| Allows you to access the web UI remotely using the ngrok reverse tunnel service (free). It's an alternative to the built-in Gradio `--share` feature. |
30
+ |[perplexity_colors](https://github.com/oobabooga/text-generation-webui/tree/main/extensions/perplexity_colors)| Colors each token in the output text by its associated probability, as derived from the model logits. |
31
+
32
+ ## How to write an extension
33
+
34
+ The extensions framework is based on special functions and variables that you can define in `script.py`. The functions are the following:
35
+
36
+ | Function | Description |
37
+ |-------------|-------------|
38
+ | `def setup()` | Is executed when the extension gets imported. |
39
+ | `def ui()` | Creates custom gradio elements when the UI is launched. |
40
+ | `def custom_css()` | Returns custom CSS as a string. It is applied whenever the web UI is loaded. |
41
+ | `def custom_js()` | Same as above but for javascript. |
42
+ | `def input_modifier(string, state, is_chat=False)` | Modifies the input string before it enters the model. In chat mode, it is applied to the user message. Otherwise, it is applied to the entire prompt. |
43
+ | `def output_modifier(string, state, is_chat=False)` | Modifies the output string before it is presented in the UI. In chat mode, it is applied to the bot's reply. Otherwise, it is applied to the entire output. |
44
+ | `def chat_input_modifier(text, visible_text, state)` | Modifies both the visible and internal inputs in chat mode. Can be used to hijack the chat input with custom content. |
45
+ | `def bot_prefix_modifier(string, state)` | Applied in chat mode to the prefix for the bot's reply. |
46
+ | `def state_modifier(state)` | Modifies the dictionary containing the UI input parameters before it is used by the text generation functions. |
47
+ | `def history_modifier(history)` | Modifies the chat history before the text generation in chat mode begins. |
48
+ | `def custom_generate_reply(...)` | Overrides the main text generation function. |
49
+ | `def custom_generate_chat_prompt(...)` | Overrides the prompt generator in chat mode. |
50
+ | `def tokenizer_modifier(state, prompt, input_ids, input_embeds)` | Modifies the `input_ids`/`input_embeds` fed to the model. Should return `prompt`, `input_ids`, `input_embeds`. See the `multimodal` extension for an example. |
51
+ | `def custom_tokenized_length(prompt)` | Used in conjunction with `tokenizer_modifier`, returns the length in tokens of `prompt`. See the `multimodal` extension for an example. |
52
+
53
+ Additionally, you can define a special `params` dictionary. In it, the `display_name` key is used to define the displayed name of the extension in the UI, and the `is_tab` key is used to define whether the extension should appear in a new tab. By default, extensions appear at the bottom of the "Text generation" tab.
54
+
55
+ Example:
56
+
57
+ ```python
58
+ params = {
59
+ "display_name": "Google Translate",
60
+ "is_tab": True,
61
+ }
62
+ ```
63
+
64
+ The `params` dict may also contain variables that you want to be customizable through a `settings.yaml` file. For instance, assuming the extension is in `extensions/google_translate`, the variable `language string` in
65
+
66
+ ```python
67
+ params = {
68
+ "display_name": "Google Translate",
69
+ "is_tab": True,
70
+ "language string": "jp"
71
+ }
72
+ ```
73
+
74
+ can be customized by adding a key called `google_translate-language string` to `settings.yaml`:
75
+
76
+ ```python
77
+ google_translate-language string: 'fr'
78
+ ```
79
+
80
+ That is, the syntax for the key is `extension_name-variable_name`.
81
+
82
+ ## Using multiple extensions at the same time
83
+
84
+ You can activate more than one extension at a time by providing their names separated by spaces after `--extensions`. The input, output, and bot prefix modifiers will be applied in the specified order.
85
+
86
+ Example:
87
+
88
+ ```
89
+ python server.py --extensions enthusiasm translate # First apply enthusiasm, then translate
90
+ python server.py --extensions translate enthusiasm # First apply translate, then enthusiasm
91
+ ```
92
+
93
+ Do note, that for:
94
+ - `custom_generate_chat_prompt`
95
+ - `custom_generate_reply`
96
+ - `custom_tokenized_length`
97
+
98
+ only the first declaration encountered will be used and the rest will be ignored.
99
+
100
+ ## A full example
101
+
102
+ The source code below can be found at [extensions/example/script.py](https://github.com/oobabooga/text-generation-webui/tree/main/extensions/example/script.py).
103
+
104
+ ```python
105
+ """
106
+ An example of extension. It does nothing, but you can add transformations
107
+ before the return statements to customize the webui behavior.
108
+
109
+ Starting from history_modifier and ending in output_modifier, the
110
+ functions are declared in the same order that they are called at
111
+ generation time.
112
+ """
113
+
114
+ import gradio as gr
115
+ import torch
116
+ from transformers import LogitsProcessor
117
+
118
+ from modules import chat, shared
119
+ from modules.text_generation import (
120
+ decode,
121
+ encode,
122
+ generate_reply,
123
+ )
124
+
125
+ params = {
126
+ "display_name": "Example Extension",
127
+ "is_tab": False,
128
+ }
129
+
130
+ class MyLogits(LogitsProcessor):
131
+ """
132
+ Manipulates the probabilities for the next token before it gets sampled.
133
+ Used in the logits_processor_modifier function below.
134
+ """
135
+ def __init__(self):
136
+ pass
137
+
138
+ def __call__(self, input_ids, scores):
139
+ # probs = torch.softmax(scores, dim=-1, dtype=torch.float)
140
+ # probs[0] /= probs[0].sum()
141
+ # scores = torch.log(probs / (1 - probs))
142
+ return scores
143
+
144
+ def history_modifier(history):
145
+ """
146
+ Modifies the chat history.
147
+ Only used in chat mode.
148
+ """
149
+ return history
150
+
151
+ def state_modifier(state):
152
+ """
153
+ Modifies the state variable, which is a dictionary containing the input
154
+ values in the UI like sliders and checkboxes.
155
+ """
156
+ return state
157
+
158
+ def chat_input_modifier(text, visible_text, state):
159
+ """
160
+ Modifies the user input string in chat mode (visible_text).
161
+ You can also modify the internal representation of the user
162
+ input (text) to change how it will appear in the prompt.
163
+ """
164
+ return text, visible_text
165
+
166
+ def input_modifier(string, state, is_chat=False):
167
+ """
168
+ In default/notebook modes, modifies the whole prompt.
169
+
170
+ In chat mode, it is the same as chat_input_modifier but only applied
171
+ to "text", here called "string", and not to "visible_text".
172
+ """
173
+ return string
174
+
175
+ def bot_prefix_modifier(string, state):
176
+ """
177
+ Modifies the prefix for the next bot reply in chat mode.
178
+ By default, the prefix will be something like "Bot Name:".
179
+ """
180
+ return string
181
+
182
+ def tokenizer_modifier(state, prompt, input_ids, input_embeds):
183
+ """
184
+ Modifies the input ids and embeds.
185
+ Used by the multimodal extension to put image embeddings in the prompt.
186
+ Only used by loaders that use the transformers library for sampling.
187
+ """
188
+ return prompt, input_ids, input_embeds
189
+
190
+ def logits_processor_modifier(processor_list, input_ids):
191
+ """
192
+ Adds logits processors to the list, allowing you to access and modify
193
+ the next token probabilities.
194
+ Only used by loaders that use the transformers library for sampling.
195
+ """
196
+ processor_list.append(MyLogits())
197
+ return processor_list
198
+
199
+ def output_modifier(string, state, is_chat=False):
200
+ """
201
+ Modifies the LLM output before it gets presented.
202
+
203
+ In chat mode, the modified version goes into history['visible'],
204
+ and the original version goes into history['internal'].
205
+ """
206
+ return string
207
+
208
+ def custom_generate_chat_prompt(user_input, state, **kwargs):
209
+ """
210
+ Replaces the function that generates the prompt from the chat history.
211
+ Only used in chat mode.
212
+ """
213
+ result = chat.generate_chat_prompt(user_input, state, **kwargs)
214
+ return result
215
+
216
+ def custom_css():
217
+ """
218
+ Returns a CSS string that gets appended to the CSS for the webui.
219
+ """
220
+ return ''
221
+
222
+ def custom_js():
223
+ """
224
+ Returns a javascript string that gets appended to the javascript
225
+ for the webui.
226
+ """
227
+ return ''
228
+
229
+ def setup():
230
+ """
231
+ Gets executed only once, when the extension is imported.
232
+ """
233
+ pass
234
+
235
+ def ui():
236
+ """
237
+ Gets executed when the UI is drawn. Custom gradio elements and
238
+ their corresponding event handlers should be defined here.
239
+
240
+ To learn about gradio components, check out the docs:
241
+ https://gradio.app/docs/
242
+ """
243
+ pass
244
+ ```
docs/GPTQ-models-(4-bit-mode).md ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ GPTQ is a clever quantization algorithm that lightly reoptimizes the weights during quantization so that the accuracy loss is compensated relative to a round-to-nearest quantization. See the paper for more details: https://arxiv.org/abs/2210.17323
2
+
3
+ 4-bit GPTQ models reduce VRAM usage by about 75%. So LLaMA-7B fits into a 6GB GPU, and LLaMA-30B fits into a 24GB GPU.
4
+
5
+ ## Overview
6
+
7
+ There are two ways of loading GPTQ models in the web UI at the moment:
8
+
9
+ * Using AutoGPTQ:
10
+ * supports more models
11
+ * standardized (no need to guess any parameter)
12
+ * is a proper Python library
13
+ * ~no wheels are presently available so it requires manual compilation~
14
+ * supports loading both triton and cuda models
15
+
16
+ * Using GPTQ-for-LLaMa directly:
17
+ * faster CPU offloading
18
+ * faster multi-GPU inference
19
+ * supports loading LoRAs using a monkey patch
20
+ * requires you to manually figure out the wbits/groupsize/model_type parameters for the model to be able to load it
21
+ * supports either only cuda or only triton depending on the branch
22
+
23
+ For creating new quantizations, I recommend using AutoGPTQ: https://github.com/PanQiWei/AutoGPTQ
24
+
25
+ ## AutoGPTQ
26
+
27
+ ### Installation
28
+
29
+ No additional steps are necessary as AutoGPTQ is already in the `requirements.txt` for the webui. If you still want or need to install it manually for whatever reason, these are the commands:
30
+
31
+ ```
32
+ conda activate textgen
33
+ git clone https://github.com/PanQiWei/AutoGPTQ.git && cd AutoGPTQ
34
+ pip install .
35
+ ```
36
+
37
+ The last command requires `nvcc` to be installed (see the [instructions above](https://github.com/oobabooga/text-generation-webui/blob/main/docs/GPTQ-models-(4-bit-mode).md#step-1-install-nvcc)).
38
+
39
+ ### Usage
40
+
41
+ When you quantize a model using AutoGPTQ, a folder containing a filed called `quantize_config.json` will be generated. Place that folder inside your `models/` folder and load it with the `--autogptq` flag:
42
+
43
+ ```
44
+ python server.py --autogptq --model model_name
45
+ ```
46
+
47
+ Alternatively, check the `autogptq` box in the "Model" tab of the UI before loading the model.
48
+
49
+ ### Offloading
50
+
51
+ In order to do CPU offloading or multi-gpu inference with AutoGPTQ, use the `--gpu-memory` flag. It is currently somewhat slower than offloading with the `--pre_layer` option in GPTQ-for-LLaMA.
52
+
53
+ For CPU offloading:
54
+
55
+ ```
56
+ python server.py --autogptq --gpu-memory 3000MiB --model model_name
57
+ ```
58
+
59
+ For multi-GPU inference:
60
+
61
+ ```
62
+ python server.py --autogptq --gpu-memory 3000MiB 6000MiB --model model_name
63
+ ```
64
+
65
+ ### Using LoRAs with AutoGPTQ
66
+
67
+ Works fine for a single LoRA.
68
+
69
+ ## GPTQ-for-LLaMa
70
+
71
+ GPTQ-for-LLaMa is the original adaptation of GPTQ for the LLaMA model. It was made possible by [@qwopqwop200](https://github.com/qwopqwop200/GPTQ-for-LLaMa): https://github.com/qwopqwop200/GPTQ-for-LLaMa
72
+
73
+ A Python package containing both major CUDA versions of GPTQ-for-LLaMa is used to simplify installation and compatibility: https://github.com/jllllll/GPTQ-for-LLaMa-CUDA
74
+
75
+ ### Precompiled wheels
76
+
77
+ Kindly provided by our friend jllllll: https://github.com/jllllll/GPTQ-for-LLaMa-CUDA/releases
78
+
79
+ Wheels are included in requirements.txt and are installed with the webui on supported systems.
80
+
81
+ ### Manual installation
82
+
83
+ #### Step 1: install nvcc
84
+
85
+ ```
86
+ conda activate textgen
87
+ conda install cuda -c nvidia/label/cuda-11.7.1
88
+ ```
89
+
90
+ The command above takes some 10 minutes to run and shows no progress bar or updates along the way.
91
+
92
+ You are also going to need to have a C++ compiler installed. On Linux, `sudo apt install build-essential` or equivalent is enough. On Windows, Visual Studio or Visual Studio Build Tools is required.
93
+
94
+ If you're using an older version of CUDA toolkit (e.g. 11.7) but the latest version of `gcc` and `g++` (12.0+) on Linux, you should downgrade with: `conda install -c conda-forge gxx==11.3.0`. Kernel compilation will fail otherwise.
95
+
96
+ #### Step 2: compile the CUDA extensions
97
+
98
+ ```
99
+ python -m pip install git+https://github.com/jllllll/GPTQ-for-LLaMa-CUDA -v
100
+ ```
101
+
102
+ ### Getting pre-converted LLaMA weights
103
+
104
+ * Direct download (recommended):
105
+
106
+ https://huggingface.co/Neko-Institute-of-Science/LLaMA-7B-4bit-128g
107
+
108
+ https://huggingface.co/Neko-Institute-of-Science/LLaMA-13B-4bit-128g
109
+
110
+ https://huggingface.co/Neko-Institute-of-Science/LLaMA-30B-4bit-128g
111
+
112
+ https://huggingface.co/Neko-Institute-of-Science/LLaMA-65B-4bit-128g
113
+
114
+ These models were converted with `desc_act=True`. They work just fine with ExLlama. For AutoGPTQ, they will only work on Linux with the `triton` option checked.
115
+
116
+ * Torrent:
117
+
118
+ https://github.com/oobabooga/text-generation-webui/pull/530#issuecomment-1483891617
119
+
120
+ https://github.com/oobabooga/text-generation-webui/pull/530#issuecomment-1483941105
121
+
122
+ These models were converted with `desc_act=False`. As such, they are less accurate, but they work with AutoGPTQ on Windows. The `128g` versions are better from 13b upwards, and worse for 7b. The tokenizer files in the torrents are outdated, in particular the files called `tokenizer_config.json` and `special_tokens_map.json`. Here you can find those files: https://huggingface.co/oobabooga/llama-tokenizer
123
+
124
+ ### Starting the web UI:
125
+
126
+ Use the `--gptq-for-llama` flag.
127
+
128
+ For the models converted without `group-size`:
129
+
130
+ ```
131
+ python server.py --model llama-7b-4bit --gptq-for-llama
132
+ ```
133
+
134
+ For the models converted with `group-size`:
135
+
136
+ ```
137
+ python server.py --model llama-13b-4bit-128g --gptq-for-llama --wbits 4 --groupsize 128
138
+ ```
139
+
140
+ The command-line flags `--wbits` and `--groupsize` are automatically detected based on the folder names in many cases.
141
+
142
+ ### CPU offloading
143
+
144
+ It is possible to offload part of the layers of the 4-bit model to the CPU with the `--pre_layer` flag. The higher the number after `--pre_layer`, the more layers will be allocated to the GPU.
145
+
146
+ With this command, I can run llama-7b with 4GB VRAM:
147
+
148
+ ```
149
+ python server.py --model llama-7b-4bit --pre_layer 20
150
+ ```
151
+
152
+ This is the performance:
153
+
154
+ ```
155
+ Output generated in 123.79 seconds (1.61 tokens/s, 199 tokens)
156
+ ```
157
+
158
+ You can also use multiple GPUs with `pre_layer` if using the oobabooga fork of GPTQ, eg `--pre_layer 30 60` will load a LLaMA-30B model half onto your first GPU and half onto your second, or `--pre_layer 20 40` will load 20 layers onto GPU-0, 20 layers onto GPU-1, and 20 layers offloaded to CPU.
159
+
160
+ ### Using LoRAs with GPTQ-for-LLaMa
161
+
162
+ This requires using a monkey patch that is supported by this web UI: https://github.com/johnsmith0031/alpaca_lora_4bit
163
+
164
+ To use it:
165
+
166
+ 1. Clone `johnsmith0031/alpaca_lora_4bit` into the repositories folder:
167
+
168
+ ```
169
+ cd text-generation-webui/repositories
170
+ git clone https://github.com/johnsmith0031/alpaca_lora_4bit
171
+ ```
172
+
173
+ ⚠️ I have tested it with the following commit specifically: `2f704b93c961bf202937b10aac9322b092afdce0`
174
+
175
+ 2. Install https://github.com/sterlind/GPTQ-for-LLaMa with this command:
176
+
177
+ ```
178
+ pip install git+https://github.com/sterlind/GPTQ-for-LLaMa.git@lora_4bit
179
+ ```
180
+
181
+ 3. Start the UI with the `--monkey-patch` flag:
182
+
183
+ ```
184
+ python server.py --model llama-7b-4bit-128g --listen --lora tloen_alpaca-lora-7b --monkey-patch
185
+ ```
186
+
187
+
docs/LLaMA-model.md ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ LLaMA is a Large Language Model developed by Meta AI.
2
+
3
+ It was trained on more tokens than previous models. The result is that the smallest version with 7 billion parameters has similar performance to GPT-3 with 175 billion parameters.
4
+
5
+ This guide will cover usage through the official `transformers` implementation. For 4-bit mode, head over to [GPTQ models (4 bit mode)
6
+ ](GPTQ-models-(4-bit-mode).md).
7
+
8
+ ## Getting the weights
9
+
10
+ ### Option 1: pre-converted weights
11
+
12
+ * Direct download (recommended):
13
+
14
+ https://huggingface.co/Neko-Institute-of-Science/LLaMA-7B-HF
15
+
16
+ https://huggingface.co/Neko-Institute-of-Science/LLaMA-13B-HF
17
+
18
+ https://huggingface.co/Neko-Institute-of-Science/LLaMA-30B-HF
19
+
20
+ https://huggingface.co/Neko-Institute-of-Science/LLaMA-65B-HF
21
+
22
+ * Torrent:
23
+
24
+ https://github.com/oobabooga/text-generation-webui/pull/530#issuecomment-1484235789
25
+
26
+ The tokenizer files in the torrent above are outdated, in particular the files called `tokenizer_config.json` and `special_tokens_map.json`. Here you can find those files: https://huggingface.co/oobabooga/llama-tokenizer
27
+
28
+ ### Option 2: convert the weights yourself
29
+
30
+ 1. Install the `protobuf` library:
31
+
32
+ ```
33
+ pip install protobuf==3.20.1
34
+ ```
35
+
36
+ 2. Use the script below to convert the model in `.pth` format that you, a fellow academic, downloaded using Meta's official link.
37
+
38
+ If you have `transformers` installed in place:
39
+
40
+ ```
41
+ python -m transformers.models.llama.convert_llama_weights_to_hf --input_dir /path/to/LLaMA --model_size 7B --output_dir /tmp/outputs/llama-7b
42
+ ```
43
+
44
+ Otherwise download [convert_llama_weights_to_hf.py](https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/convert_llama_weights_to_hf.py) first and run:
45
+
46
+ ```
47
+ python convert_llama_weights_to_hf.py --input_dir /path/to/LLaMA --model_size 7B --output_dir /tmp/outputs/llama-7b
48
+ ```
49
+
50
+ 3. Move the `llama-7b` folder inside your `text-generation-webui/models` folder.
51
+
52
+ ## Starting the web UI
53
+
54
+ ```python
55
+ python server.py --model llama-7b
56
+ ```
docs/LLaMA-v2-model.md ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # LLaMA-v2
2
+
3
+ To convert LLaMA-v2 from the `.pth` format provided by Meta to transformers format, follow the steps below:
4
+
5
+ 1) `cd` into your `llama` folder (the one containing `download.sh` and the models that you downloaded):
6
+
7
+ ```
8
+ cd llama
9
+ ```
10
+
11
+ 2) Clone the transformers library:
12
+
13
+ ```
14
+ git clone 'https://github.com/huggingface/transformers'
15
+
16
+ ```
17
+
18
+ 3) Create symbolic links from the downloaded folders to names that the conversion script can recognize:
19
+
20
+ ```
21
+ ln -s llama-2-7b 7B
22
+ ln -s llama-2-13b 13B
23
+ ```
24
+
25
+ 4) Do the conversions:
26
+
27
+ ```
28
+ mkdir llama-2-7b-hf llama-2-13b-hf
29
+ python ./transformers/src/transformers/models/llama/convert_llama_weights_to_hf.py --input_dir . --model_size 7B --output_dir llama-2-7b-hf --safe_serialization true
30
+ python ./transformers/src/transformers/models/llama/convert_llama_weights_to_hf.py --input_dir . --model_size 13B --output_dir llama-2-13b-hf --safe_serialization true
31
+ ```
32
+
33
+ 5) Move the output folders inside `text-generation-webui/models`
34
+
35
+ 6) Have fun
docs/LoRA.md ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # LoRA
2
+
3
+ LoRA (Low-Rank Adaptation) is an extremely powerful method for customizing a base model by training only a small number of parameters. They can be attached to models at runtime.
4
+
5
+ For instance, a 50mb LoRA can teach LLaMA an entire new language, a given writing style, or give it instruction-following or chat abilities.
6
+
7
+ This is the current state of LoRA integration in the web UI:
8
+
9
+ |Loader | Status |
10
+ |--------|------|
11
+ | Transformers | Full support in 16-bit, `--load-in-8bit`, `--load-in-4bit`, and CPU modes. |
12
+ | ExLlama | Single LoRA support. Fast to remove the LoRA afterwards. |
13
+ | AutoGPTQ | Single LoRA support. Removing the LoRA requires reloading the entire model.|
14
+ | GPTQ-for-LLaMa | Full support with the [monkey patch](https://github.com/oobabooga/text-generation-webui/blob/main/docs/GPTQ-models-(4-bit-mode).md#using-loras-with-gptq-for-llama). |
15
+
16
+ ## Downloading a LoRA
17
+
18
+ The download script can be used. For instance:
19
+
20
+ ```
21
+ python download-model.py tloen/alpaca-lora-7b
22
+ ```
23
+
24
+ The files will be saved to `loras/tloen_alpaca-lora-7b`.
25
+
26
+ ## Using the LoRA
27
+
28
+ The `--lora` command-line flag can be used. Examples:
29
+
30
+ ```
31
+ python server.py --model llama-7b-hf --lora tloen_alpaca-lora-7b
32
+ python server.py --model llama-7b-hf --lora tloen_alpaca-lora-7b --load-in-8bit
33
+ python server.py --model llama-7b-hf --lora tloen_alpaca-lora-7b --load-in-4bit
34
+ python server.py --model llama-7b-hf --lora tloen_alpaca-lora-7b --cpu
35
+ ```
36
+
37
+ Instead of using the `--lora` command-line flag, you can also select the LoRA in the "Parameters" tab of the interface.
38
+
39
+ ## Prompt
40
+ For the Alpaca LoRA in particular, the prompt must be formatted like this:
41
+
42
+ ```
43
+ Below is an instruction that describes a task. Write a response that appropriately completes the request.
44
+ ### Instruction:
45
+ Write a Python script that generates text using the transformers library.
46
+ ### Response:
47
+ ```
48
+
49
+ Sample output:
50
+
51
+ ```
52
+ Below is an instruction that describes a task. Write a response that appropriately completes the request.
53
+ ### Instruction:
54
+ Write a Python script that generates text using the transformers library.
55
+ ### Response:
56
+
57
+ import transformers
58
+ from transformers import AutoTokenizer, AutoModelForCausalLM
59
+ tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
60
+ model = AutoModelForCausalLM.from_pretrained("bert-base-uncased")
61
+ texts = ["Hello world", "How are you"]
62
+ for sentence in texts:
63
+ sentence = tokenizer(sentence)
64
+ print(f"Generated {len(sentence)} tokens from '{sentence}'")
65
+ output = model(sentences=sentence).predict()
66
+ print(f"Predicted {len(output)} tokens for '{sentence}':\n{output}")
67
+ ```
68
+
69
+ ## Training a LoRA
70
+
71
+ You can train your own LoRAs from the `Training` tab. See [Training LoRAs](Training-LoRAs.md) for details.
docs/Low-VRAM-guide.md ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ If you GPU is not large enough to fit a 16-bit model, try these in the following order:
2
+
3
+ ### Load the model in 8-bit mode
4
+
5
+ ```
6
+ python server.py --load-in-8bit
7
+ ```
8
+
9
+ ### Load the model in 4-bit mode
10
+
11
+ ```
12
+ python server.py --load-in-4bit
13
+ ```
14
+
15
+ ### Split the model across your GPU and CPU
16
+
17
+ ```
18
+ python server.py --auto-devices
19
+ ```
20
+
21
+ If you can load the model with this command but it runs out of memory when you try to generate text, try increasingly limiting the amount of memory allocated to the GPU until the error stops happening:
22
+
23
+ ```
24
+ python server.py --auto-devices --gpu-memory 10
25
+ python server.py --auto-devices --gpu-memory 9
26
+ python server.py --auto-devices --gpu-memory 8
27
+ ...
28
+ ```
29
+
30
+ where the number is in GiB.
31
+
32
+ For finer control, you can also specify the unit in MiB explicitly:
33
+
34
+ ```
35
+ python server.py --auto-devices --gpu-memory 8722MiB
36
+ python server.py --auto-devices --gpu-memory 4725MiB
37
+ python server.py --auto-devices --gpu-memory 3500MiB
38
+ ...
39
+ ```
40
+
41
+ ### Send layers to a disk cache
42
+
43
+ As a desperate last measure, you can split the model across your GPU, CPU, and disk:
44
+
45
+ ```
46
+ python server.py --auto-devices --disk
47
+ ```
48
+
49
+ With this, I am able to load a 30b model into my RTX 3090, but it takes 10 seconds to generate 1 word.
50
+
51
+ ### DeepSpeed (experimental)
52
+
53
+ An experimental alternative to all of the above is to use DeepSpeed: [guide](DeepSpeed.md).
docs/README.md ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # text-generation-webui documentation
2
+
3
+ ## Table of contents
4
+
5
+ * [Audio Notification](Audio-Notification.md)
6
+ * [Chat mode](Chat-mode.md)
7
+ * [DeepSpeed](DeepSpeed.md)
8
+ * [Docker](Docker.md)
9
+ * [ExLlama](ExLlama.md)
10
+ * [Extensions](Extensions.md)
11
+ * [GPTQ models (4 bit mode)](GPTQ-models-(4-bit-mode).md)
12
+ * [LLaMA model](LLaMA-model.md)
13
+ * [llama.cpp](llama.cpp.md)
14
+ * [LoRA](LoRA.md)
15
+ * [Low VRAM guide](Low-VRAM-guide.md)
16
+ * [RWKV model](RWKV-model.md)
17
+ * [Spell book](Spell-book.md)
18
+ * [System requirements](System-requirements.md)
19
+ * [Training LoRAs](Training-LoRAs.md)
20
+ * [Windows installation guide](Windows-installation-guide.md)
21
+ * [WSL installation guide](WSL-installation-guide.md)
docs/RWKV-model.md ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ > RWKV: RNN with Transformer-level LLM Performance
2
+ >
3
+ > It combines the best of RNN and transformer - great performance, fast inference, saves VRAM, fast training, "infinite" ctx_len, and free sentence embedding (using the final hidden state).
4
+
5
+ https://github.com/BlinkDL/RWKV-LM
6
+
7
+ https://github.com/BlinkDL/ChatRWKV
8
+
9
+ ## Using RWKV in the web UI
10
+
11
+ ### Hugging Face weights
12
+
13
+ Simply download the weights from https://huggingface.co/RWKV and load them as you would for any other model.
14
+
15
+ There is a bug in transformers==4.29.2 that prevents RWKV from being loaded in 8-bit mode. You can install the dev branch to solve this bug: `pip install git+https://github.com/huggingface/transformers`
16
+
17
+ ### Original .pth weights
18
+
19
+ The instructions below are from before RWKV was supported in transformers, and they are kept for legacy purposes. The old implementation is possibly faster, but it lacks the full range of samplers that the transformers library offers.
20
+
21
+ #### 0. Install the RWKV library
22
+
23
+ ```
24
+ pip install rwkv
25
+ ```
26
+
27
+ `0.7.3` was the last version that I tested. If you experience any issues, try ```pip install rwkv==0.7.3```.
28
+
29
+ #### 1. Download the model
30
+
31
+ It is available in different sizes:
32
+
33
+ * https://huggingface.co/BlinkDL/rwkv-4-pile-3b/
34
+ * https://huggingface.co/BlinkDL/rwkv-4-pile-7b/
35
+ * https://huggingface.co/BlinkDL/rwkv-4-pile-14b/
36
+
37
+ There are also older releases with smaller sizes like:
38
+
39
+ * https://huggingface.co/BlinkDL/rwkv-4-pile-169m/resolve/main/RWKV-4-Pile-169M-20220807-8023.pth
40
+
41
+ Download the chosen `.pth` and put it directly in the `models` folder.
42
+
43
+ #### 2. Download the tokenizer
44
+
45
+ [20B_tokenizer.json](https://raw.githubusercontent.com/BlinkDL/ChatRWKV/main/v2/20B_tokenizer.json)
46
+
47
+ Also put it directly in the `models` folder. Make sure to not rename it. It should be called `20B_tokenizer.json`.
48
+
49
+ #### 3. Launch the web UI
50
+
51
+ No additional steps are required. Just launch it as you would with any other model.
52
+
53
+ ```
54
+ python server.py --listen --no-stream --model RWKV-4-Pile-169M-20220807-8023.pth
55
+ ```
56
+
57
+ #### Setting a custom strategy
58
+
59
+ It is possible to have very fine control over the offloading and precision for the model with the `--rwkv-strategy` flag. Possible values include:
60
+
61
+ ```
62
+ "cpu fp32" # CPU mode
63
+ "cuda fp16" # GPU mode with float16 precision
64
+ "cuda fp16 *30 -> cpu fp32" # GPU+CPU offloading. The higher the number after *, the higher the GPU allocation.
65
+ "cuda fp16i8" # GPU mode with 8-bit precision
66
+ ```
67
+
68
+ See the README for the PyPl package for more details: https://pypi.org/project/rwkv/
69
+
70
+ #### Compiling the CUDA kernel
71
+
72
+ You can compile the CUDA kernel for the model with `--rwkv-cuda-on`. This should improve the performance a lot but I haven't been able to get it to work yet.
docs/Spell-book.md ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ You have now entered a hidden corner of the internet.
2
+
3
+ A confusing yet intriguing realm of paradoxes and contradictions.
4
+
5
+ A place where you will find out that what you thought you knew, you in fact didn't know, and what you didn't know was in front of you all along.
6
+
7
+ ![](https://i.pinimg.com/originals/6e/e2/7b/6ee27bad351d3aca470d80f1033ba9c6.jpg)
8
+
9
+ *In other words, here I will document little-known facts about this web UI that I could not find another place for in the wiki.*
10
+
11
+ #### You can train LoRAs in CPU mode
12
+
13
+ Load the web UI with
14
+
15
+ ```
16
+ python server.py --cpu
17
+ ```
18
+
19
+ and start training the LoRA from the training tab as usual.
20
+
21
+ #### 8-bit mode works with CPU offloading
22
+
23
+ ```
24
+ python server.py --load-in-8bit --gpu-memory 4000MiB
25
+ ```
26
+
27
+ #### `--pre_layer`, and not `--gpu-memory`, is the right way to do CPU offloading with 4-bit models
28
+
29
+ ```
30
+ python server.py --wbits 4 --groupsize 128 --pre_layer 20
31
+ ```
32
+
33
+ #### Models can be loaded in 32-bit, 16-bit, 8-bit, and 4-bit modes
34
+
35
+ ```
36
+ python server.py --cpu
37
+ python server.py
38
+ python server.py --load-in-8bit
39
+ python server.py --wbits 4
40
+ ```
41
+
42
+ #### The web UI works with any version of GPTQ-for-LLaMa
43
+
44
+ Including the up to date triton and cuda branches. But you have to delete the `repositories/GPTQ-for-LLaMa` folder and reinstall the new one every time:
45
+
46
+ ```
47
+ cd text-generation-webui/repositories
48
+ rm -r GPTQ-for-LLaMa
49
+ pip uninstall quant-cuda
50
+ git clone https://github.com/oobabooga/GPTQ-for-LLaMa -b cuda # or any other repository and branch
51
+ cd GPTQ-for-LLaMa
52
+ python setup_cuda.py install
53
+ ```
54
+
55
+ #### Instruction-following templates are represented as chat characters
56
+
57
+ https://github.com/oobabooga/text-generation-webui/tree/main/characters/instruction-following
58
+
59
+ #### The right way to run Alpaca, Open Assistant, Vicuna, etc is Instruct mode, not normal chat mode
60
+
61
+ Otherwise the prompt will not be formatted correctly.
62
+
63
+ 1. Start the web UI with
64
+
65
+ ```
66
+ python server.py --chat
67
+ ```
68
+
69
+ 2. Click on the "instruct" option under "Chat modes"
70
+
71
+ 3. Select the correct template in the hidden dropdown menu that will become visible.
72
+
73
+ #### Notebook mode is best mode
74
+
75
+ Ascended individuals have realized that notebook mode is the superset of chat mode and can do chats with ultimate flexibility, including group chats, editing replies, starting a new bot reply in a given way, and impersonating.
76
+
77
+ #### RWKV is a RNN
78
+
79
+ Most models are transformers, but not RWKV, which is a RNN. It's a great model.
80
+
81
+ #### `--gpu-memory` is not a hard limit on the GPU memory
82
+
83
+ It is simply a parameter that is passed to the `accelerate` library while loading the model. More memory will be allocated during generation. That's why this parameter has to be set to less than your total GPU memory.
84
+
85
+ #### Contrastive search perhaps the best preset
86
+
87
+ But it uses a ton of VRAM.
88
+
89
+ #### You can check the sha256sum of downloaded models with the download script
90
+
91
+ ```
92
+ python download-model.py facebook/galactica-125m --check
93
+ ```
94
+
95
+ #### The download script continues interrupted downloads by default
96
+
97
+ It doesn't start over.
98
+
99
+ #### You can download models with multiple threads
100
+
101
+ ```
102
+ python download-model.py facebook/galactica-125m --threads 8
103
+ ```
104
+
105
+ #### LoRAs work in 4-bit mode
106
+
107
+ You need to follow [these instructions](GPTQ-models-(4-bit-mode).md#using-loras-in-4-bit-mode) and then start the web UI with the `--monkey-patch` flag.
docs/System-requirements.md ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ These are the VRAM and RAM requirements (in MiB) to run some examples of models **in 16-bit (default) precision**:
2
+
3
+ | model | VRAM (GPU) | RAM |
4
+ |:-----------------------|-------------:|--------:|
5
+ | arxiv_ai_gpt2 | 1512.37 | 5824.2 |
6
+ | blenderbot-1B-distill | 2441.75 | 4425.91 |
7
+ | opt-1.3b | 2509.61 | 4427.79 |
8
+ | gpt-neo-1.3b | 2605.27 | 5851.58 |
9
+ | opt-2.7b | 5058.05 | 4863.95 |
10
+ | gpt4chan_model_float16 | 11653.7 | 4437.71 |
11
+ | gpt-j-6B | 11653.7 | 5633.79 |
12
+ | galactica-6.7b | 12697.9 | 4429.89 |
13
+ | opt-6.7b | 12700 | 4368.66 |
14
+ | bloomz-7b1-p3 | 13483.1 | 4470.34 |
15
+
16
+ #### GPU mode with 8-bit precision
17
+
18
+ Allows you to load models that would not normally fit into your GPU. Enabled by default for 13b and 20b models in this web UI.
19
+
20
+ | model | VRAM (GPU) | RAM |
21
+ |:---------------|-------------:|--------:|
22
+ | opt-13b | 12528.1 | 1152.39 |
23
+ | gpt-neox-20b | 20384 | 2291.7 |
24
+
25
+ #### CPU mode (32-bit precision)
26
+
27
+ A lot slower, but does not require a GPU.
28
+
29
+ On my i5-12400F, 6B models take around 10-20 seconds to respond in chat mode, and around 5 minutes to generate a 200 tokens completion.
30
+
31
+ | model | RAM |
32
+ |:-----------------------|---------:|
33
+ | arxiv_ai_gpt2 | 4430.82 |
34
+ | gpt-neo-1.3b | 6089.31 |
35
+ | opt-1.3b | 8411.12 |
36
+ | blenderbot-1B-distill | 8508.16 |
37
+ | opt-2.7b | 14969.3 |
38
+ | bloomz-7b1-p3 | 21371.2 |
39
+ | gpt-j-6B | 24200.3 |
40
+ | gpt4chan_model | 24246.3 |
41
+ | galactica-6.7b | 26561.4 |
42
+ | opt-6.7b | 29596.6 |
docs/Training-LoRAs.md ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Training Your Own LoRAs
2
+
3
+ The WebUI seeks to make training your own LoRAs as easy as possible. It comes down to just a few simple steps:
4
+
5
+ ### **Step 1**: Make a plan.
6
+ - What base model do you want to use? The LoRA you make has to be matched up to a single architecture (eg LLaMA-13B) and cannot be transferred to others (eg LLaMA-7B, StableLM, etc. would all be different). Derivatives of the same model (eg Alpaca finetune of LLaMA-13B) might be transferrable, but even then it's best to train exactly on what you plan to use.
7
+ - What model format do you want? At time of writing, 8-bit models are most stable, and 4-bit are supported but experimental. In the near future it is likely that 4-bit will be the best option for most users.
8
+ - What are you training it on? Do you want it to learn real information, a simple format, ...?
9
+
10
+ ### **Step 2**: Gather a dataset.
11
+ - If you use a dataset similar to the [Alpaca](https://github.com/gururise/AlpacaDataCleaned/blob/main/alpaca_data_cleaned.json) format, that is natively supported by the `Formatted Dataset` input in the WebUI, with premade formatter options.
12
+ - If you use a dataset that isn't matched to Alpaca's format, but uses the same basic JSON structure, you can make your own format file by copying `training/formats/alpaca-format.json` to a new file and [editing its content](#format-files).
13
+ - If you can get the dataset into a simple text file, that works too! You can train using the `Raw text file` input option.
14
+ - This means you can for example just copy/paste a chatlog/documentation page/whatever you want, shove it in a plain text file, and train on it.
15
+ - If you use a structured dataset not in this format, you may have to find an external way to convert it - or open an issue to request native support.
16
+
17
+ ### **Step 3**: Do the training.
18
+ - **3.1**: Load the WebUI, and your model.
19
+ - Make sure you don't have any LoRAs already loaded (unless you want to train for multi-LoRA usage).
20
+ - **3.2**: Open the `Training` tab at the top, `Train LoRA` sub-tab.
21
+ - **3.3**: Fill in the name of the LoRA, select your dataset in the dataset options.
22
+ - **3.4**: Select other parameters to your preference. See [parameters below](#parameters).
23
+ - **3.5**: click `Start LoRA Training`, and wait.
24
+ - It can take a few hours for a large dataset, or just a few minute if doing a small run.
25
+ - You may want to monitor your [loss value](#loss) while it goes.
26
+
27
+ ### **Step 4**: Evaluate your results.
28
+ - Load the LoRA under the Models Tab.
29
+ - You can go test-drive it on the `Text generation` tab, or you can use the `Perplexity evaluation` sub-tab of the `Training` tab.
30
+ - If you used the `Save every n steps` option, you can grab prior copies of the model from sub-folders within the LoRA model's folder and try them instead.
31
+
32
+ ### **Step 5**: Re-run if you're unhappy.
33
+ - Make sure to unload the LoRA before training it.
34
+ - You can simply resume a prior run - use `Copy parameters from` to select your LoRA, and edit parameters. Note that you cannot change the `Rank` of an already created LoRA.
35
+ - If you want to resume from a checkpoint saved along the way, simply copy the contents of the checkpoint folder into the LoRA's folder.
36
+ - (Note: `adapter_model.bin` is the important file that holds the actual LoRA content).
37
+ - This will start Learning Rate and Steps back to the start. If you want to resume as if you were midway through, you can adjust your Learning Rate to the last reported LR in logs and reduce your epochs.
38
+ - Or, you can start over entirely if you prefer.
39
+ - If your model is producing corrupted outputs, you probably need to start over and use a lower Learning Rate.
40
+ - If your model isn't learning detailed information but you want it to, you might need to just run more epochs, or you might need a higher Rank.
41
+ - If your model is enforcing a format you didn't want, you may need to tweak your dataset, or start over and not train as far.
42
+
43
+ ## Format Files
44
+
45
+ If using JSON formatted datasets, they are presumed to be in the following approximate format:
46
+
47
+ ```json
48
+ [
49
+ {
50
+ "somekey": "somevalue",
51
+ "key2": "value2"
52
+ },
53
+ {
54
+ // etc
55
+ }
56
+ ]
57
+ ```
58
+
59
+ Where the keys (eg `somekey`, `key2` above) are standardized, and relatively consistent across the dataset, and the values (eg `somevalue`, `value2`) contain the content actually intended to be trained.
60
+
61
+ For Alpaca, the keys are `instruction`, `input`, and `output`, wherein `input` is sometimes blank.
62
+
63
+ A simple format file for Alpaca to be used as a chat bot is:
64
+
65
+ ```json
66
+ {
67
+ "instruction,output": "User: %instruction%\nAssistant: %output%",
68
+ "instruction,input,output": "User: %instruction%: %input%\nAssistant: %output%"
69
+ }
70
+ ```
71
+
72
+ Note that the keys (eg `instruction,output`) are a comma-separated list of dataset keys, and the values are a simple string that use those keys with `%%`.
73
+
74
+ So for example if a dataset has `"instruction": "answer my question"`, then the format file's `User: %instruction%\n` will be automatically filled in as `User: answer my question\n`.
75
+
76
+ If you have different sets of key inputs, you can make your own format file to match it. This format-file is designed to be as simple as possible to enable easy editing to match your needs.
77
+
78
+ ## Raw Text File Settings
79
+
80
+ When using raw text files as your dataset, the text is automatically split into chunks based on your `Cutoff Length` you get a few basic options to configure them.
81
+ - `Overlap Length` is how much to overlap chunks by. Overlapping chunks helps prevent the model from learning strange mid-sentence cuts, and instead learn continual sentences that flow from earlier text.
82
+ - `Prefer Newline Cut Length` sets a maximum distance in characters to shift the chunk cut towards newlines. Doing this helps prevent lines from starting or ending mid-sentence, preventing the model from learning to cut off sentences randomly.
83
+ - `Hard Cut String` sets a string that indicates there must be a hard cut without overlap. This defaults to `\n\n\n`, meaning 3 newlines. No trained chunk will ever contain this string. This allows you to insert unrelated sections of text in the same text file, but still ensure the model won't be taught to randomly change the subject.
84
+
85
+ ## Parameters
86
+
87
+ The basic purpose and function of each parameter is documented on-page in the WebUI, so read through them in the UI to understand your options.
88
+
89
+ That said, here's a guide to the most important parameter choices you should consider:
90
+
91
+ ### VRAM
92
+
93
+ - First, you must consider your VRAM availability.
94
+ - Generally, under default settings, VRAM usage for training with default parameters is very close to when generating text (with 1000+ tokens of context) (ie, if you can generate text, you can train LoRAs).
95
+ - Note: worse by default in the 4-bit monkeypatch currently. Reduce `Micro Batch Size` to `1` to restore this to expectations.
96
+ - If you have VRAM to spare, setting higher batch sizes will use more VRAM and get you better quality training in exchange.
97
+ - If you have large data, setting a higher cutoff length may be beneficial, but will cost significant VRAM. If you can spare some, set your batch size to `1` and see how high you can push your cutoff length.
98
+ - If you're low on VRAM, reducing batch size or cutoff length will of course improve that.
99
+ - Don't be afraid to just try it and see what happens. If it's too much, it will just error out, and you can lower settings and try again.
100
+
101
+ ### Rank
102
+
103
+ - Second, you want to consider the amount of learning you want.
104
+ - For example, you may wish to just learn a dialogue format (as in the case of Alpaca) in which case setting a low `Rank` value (32 or lower) works great.
105
+ - Or, you might be training on project documentation you want the bot to understand and be able to understand questions about, in which case the higher the rank, the better.
106
+ - Generally, higher Rank = more precise learning = more total content learned = more VRAM usage while training.
107
+
108
+ ### Learning Rate and Epochs
109
+
110
+ - Third, how carefully you want it to be learned.
111
+ - In other words, how okay or not you are with the model losing unrelated understandings.
112
+ - You can control this with 3 key settings: the Learning Rate, its scheduler, and your total epochs.
113
+ - The learning rate controls how much change is made to the model by each token it sees.
114
+ - It's in scientific notation normally, so for example `3e-4` means `3 * 10^-4` which is `0.0003`. The number after `e-` controls how many `0`s are in the number.
115
+ - Higher values let training run faster, but also are more likely to corrupt prior data in the model.
116
+ - You essentially have two variables to balance: the LR, and Epochs.
117
+ - If you make LR higher, you can set Epochs equally lower to match. High LR + low epochs = very fast, low quality training.
118
+ - If you make LR low, set epochs high. Low LR + high epochs = slow but high-quality training.
119
+ - The scheduler controls change-over-time as you train - it starts high, and then goes low. This helps balance getting data in, and having decent quality, at the same time.
120
+ - You can see graphs of the different scheduler options [in the HuggingFace docs here](https://moon-ci-docs.huggingface.co/docs/transformers/pr_1/en/main_classes/optimizer_schedules#transformers.SchedulerType)
121
+
122
+ ## Loss
123
+
124
+ When you're running training, the WebUI's console window will log reports that include, among other things, a numeric value named `Loss`. It will start as a high number, and gradually get lower and lower as it goes.
125
+
126
+ "Loss" in the world of AI training theoretically means "how close is the model to perfect", with `0` meaning "absolutely perfect". This is calculated by measuring the difference between the model outputting exactly the text you're training it to output, and what it actually outputs.
127
+
128
+ In practice, a good LLM should have a very complex variable range of ideas running in its artificial head, so a loss of `0` would indicate that the model has broken and forgotten to how think about anything other than what you trained it.
129
+
130
+ So, in effect, Loss is a balancing game: you want to get it low enough that it understands your data, but high enough that it isn't forgetting everything else. Generally, if it goes below `1.0`, it's going to start forgetting its prior memories, and you should stop training. In some cases you may prefer to take it as low as `0.5` (if you want it to be very very predictable). Different goals have different needs, so don't be afraid to experiment and see what works best for you.
131
+
132
+ Note: if you see Loss start at or suddenly jump to exactly `0`, it is likely something has gone wrong in your training process (eg model corruption).
133
+
134
+ ## Note: 4-Bit Monkeypatch
135
+
136
+ The [4-bit LoRA monkeypatch](GPTQ-models-(4-bit-mode).md#using-loras-in-4-bit-mode) works for training, but has side effects:
137
+ - VRAM usage is higher currently. You can reduce the `Micro Batch Size` to `1` to compensate.
138
+ - Models do funky things. LoRAs apply themselves, or refuse to apply, or spontaneously error out, or etc. It can be helpful to reload base model or restart the WebUI between training/usage to minimize chances of anything going haywire.
139
+ - Loading or working with multiple LoRAs at the same time doesn't currently work.
140
+ - Generally, recognize and treat the monkeypatch as the dirty temporary hack it is - it works, but isn't very stable. It will get better in time when everything is merged upstream for full official support.
141
+
142
+ ## Legacy notes
143
+
144
+ LoRA training was contributed by [mcmonkey4eva](https://github.com/mcmonkey4eva) in PR [#570](https://github.com/oobabooga/text-generation-webui/pull/570).
145
+
146
+ ### Using the original alpaca-lora code
147
+
148
+ Kept here for reference. The Training tab has much more features than this method.
149
+
150
+ ```
151
+ conda activate textgen
152
+ git clone https://github.com/tloen/alpaca-lora
153
+ ```
154
+
155
+ Edit those two lines in `alpaca-lora/finetune.py` to use your existing model folder instead of downloading everything from decapoda:
156
+
157
+ ```
158
+ model = LlamaForCausalLM.from_pretrained(
159
+ "models/llama-7b",
160
+ load_in_8bit=True,
161
+ device_map="auto",
162
+ )
163
+ tokenizer = LlamaTokenizer.from_pretrained(
164
+ "models/llama-7b", add_eos_token=True
165
+ )
166
+ ```
167
+
168
+ Run the script with:
169
+
170
+ ```
171
+ python finetune.py
172
+ ```
173
+
174
+ It just works. It runs at 22.32s/it, with 1170 iterations in total, so about 7 hours and a half for training a LoRA. RTX 3090, 18153MiB VRAM used, drawing maximum power (350W, room heater mode).
docs/WSL-installation-guide.md ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Guide created by [@jfryton](https://github.com/jfryton). Thank you jfryton.
2
+
3
+ -----
4
+
5
+ Here's an easy-to-follow, step-by-step guide for installing Windows Subsystem for Linux (WSL) with Ubuntu on Windows 10/11:
6
+
7
+ ## Step 1: Enable WSL
8
+
9
+ 1. Press the Windows key + X and click on "Windows PowerShell (Admin)" or "Windows Terminal (Admin)" to open PowerShell or Terminal with administrator privileges.
10
+ 2. In the PowerShell window, type the following command and press Enter:
11
+
12
+ ```
13
+ wsl --install
14
+ ```
15
+
16
+ If this command doesn't work, you can enable WSL with the following command for Windows 10:
17
+
18
+ ```
19
+ wsl --set-default-version 1
20
+ ```
21
+
22
+ For Windows 11, you can use:
23
+
24
+ ```
25
+ wsl --set-default-version 2
26
+ ```
27
+
28
+ You may be prompted to restart your computer. If so, save your work and restart.
29
+
30
+ ## Step 2: Install Ubuntu
31
+
32
+ 1. Open the Microsoft Store.
33
+ 2. Search for "Ubuntu" in the search bar.
34
+ 3. Choose the desired Ubuntu version (e.g., Ubuntu 20.04 LTS) and click "Get" or "Install" to download and install the Ubuntu app.
35
+ 4. Once the installation is complete, click "Launch" or search for "Ubuntu" in the Start menu and open the app.
36
+
37
+ ## Step 3: Set up Ubuntu
38
+
39
+ 1. When you first launch the Ubuntu app, it will take a few minutes to set up. Be patient as it installs the necessary files and sets up your environment.
40
+ 2. Once the setup is complete, you will be prompted to create a new UNIX username and password. Choose a username and password, and make sure to remember them, as you will need them for future administrative tasks within the Ubuntu environment.
41
+
42
+ ## Step 4: Update and upgrade packages
43
+
44
+ 1. After setting up your username and password, it's a good idea to update and upgrade your Ubuntu system. Run the following commands in the Ubuntu terminal:
45
+
46
+ ```
47
+ sudo apt update
48
+ sudo apt upgrade
49
+ ```
50
+
51
+ 2. Enter your password when prompted. This will update the package list and upgrade any outdated packages.
52
+
53
+ Congratulations! You have now installed WSL with Ubuntu on your Windows 10/11 system. You can use the Ubuntu terminal for various tasks, like running Linux commands, installing packages, or managing files.
54
+
55
+ You can launch your WSL Ubuntu installation by selecting the Ubuntu app (like any other program installed on your computer) or typing 'ubuntu' into Powershell or Terminal.
56
+
57
+ ## Step 5: Proceed with Linux instructions
58
+
59
+ 1. You can now follow the Linux setup instructions. If you receive any error messages about a missing tool or package, just install them using apt:
60
+
61
+ ```
62
+ sudo apt install [missing package]
63
+ ```
64
+
65
+ You will probably need to install build-essential
66
+
67
+ ```
68
+ sudo apt install build-essential
69
+ ```
70
+
71
+ If you face any issues or need to troubleshoot, you can always refer to the official Microsoft documentation for WSL: https://docs.microsoft.com/en-us/windows/wsl/
72
+
73
+ #### WSL2 performance using /mnt:
74
+ when you git clone a repository, put it inside WSL and not outside. To understand more, take a look at this [issue](https://github.com/microsoft/WSL/issues/4197#issuecomment-604592340)
75
+
76
+ ## Bonus: Port Forwarding
77
+
78
+ By default, you won't be able to access the webui from another device on your local network. You will need to setup the appropriate port forwarding using the following command (using PowerShell or Terminal with administrator privileges).
79
+
80
+ ```
81
+ netsh interface portproxy add v4tov4 listenaddress=0.0.0.0 listenport=7860 connectaddress=localhost connectport=7860
82
+ ```
docs/Windows-installation-guide.md ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ If you are having trouble following the installation instructions in the README, Reddit user [Technical_Leather949](https://www.reddit.com/user/Technical_Leather949/) has created a more detailed, step-by-step guide covering:
2
+
3
+ * Windows installation
4
+ * 8-bit mode on Windows
5
+ * LLaMA
6
+ * LLaMA 4-bit
7
+
8
+ The guide can be found here: https://www.reddit.com/r/LocalLLaMA/comments/11o6o3f/how_to_install_llama_8bit_and_4bit/
9
+
docs/llama.cpp.md ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # llama.cpp
2
+
3
+ llama.cpp is the best backend in two important scenarios:
4
+
5
+ 1) You don't have a GPU.
6
+ 2) You want to run a model that doesn't fit into your GPU.
7
+
8
+ ## Setting up the models
9
+
10
+ #### Pre-converted
11
+
12
+ Download the GGUF or GGML models directly into your `text-generation-webui/models` folder. It will be a single file.
13
+
14
+ * For GGUF models, make sure its name contains `.gguf`.
15
+ * For GGML models, make sure its name contains `ggml` and ends in `.bin`.
16
+
17
+ `q4_K_M` quantization is recommended.
18
+
19
+ #### Convert Llama yourself
20
+
21
+ Follow the instructions in the llama.cpp README to generate a ggml: https://github.com/ggerganov/llama.cpp#prepare-data--run
22
+
23
+ ## GPU acceleration
24
+
25
+ Enabled with the `--n-gpu-layers` parameter.
26
+
27
+ * If you have enough VRAM, use a high number like `--n-gpu-layers 1000` to offload all layers to the GPU.
28
+ * Otherwise, start with a low number like `--n-gpu-layers 10` and then gradually increase it until you run out of memory.
29
+
30
+ This feature works out of the box for NVIDIA GPUs on Linux (amd64) or Windows. For other GPUs, you need to uninstall `llama-cpp-python` with
31
+
32
+ ```
33
+ pip uninstall -y llama-cpp-python
34
+ ```
35
+
36
+ and then recompile it using the commands here: https://pypi.org/project/llama-cpp-python/
37
+
38
+ #### macOS
39
+
40
+ For macOS, these are the commands:
41
+
42
+ ```
43
+ pip uninstall -y llama-cpp-python
44
+ CMAKE_ARGS="-DLLAMA_METAL=on" FORCE_CMAKE=1 pip install llama-cpp-python --no-cache-dir
45
+ ```
download-model.py ADDED
@@ -0,0 +1,275 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ Downloads models from Hugging Face to models/username_modelname.
3
+
4
+ Example:
5
+ python download-model.py facebook/opt-1.3b
6
+
7
+ '''
8
+
9
+ import argparse
10
+ import base64
11
+ import datetime
12
+ import hashlib
13
+ import json
14
+ import os
15
+ import re
16
+ import sys
17
+ from pathlib import Path
18
+
19
+ import requests
20
+ import tqdm
21
+ from requests.adapters import HTTPAdapter
22
+ from tqdm.contrib.concurrent import thread_map
23
+
24
+
25
+ class ModelDownloader:
26
+ def __init__(self, max_retries=5):
27
+ self.session = requests.Session()
28
+ if max_retries:
29
+ self.session.mount('https://cdn-lfs.huggingface.co', HTTPAdapter(max_retries=max_retries))
30
+ self.session.mount('https://huggingface.co', HTTPAdapter(max_retries=max_retries))
31
+ if os.getenv('HF_USER') is not None and os.getenv('HF_PASS') is not None:
32
+ self.session.auth = (os.getenv('HF_USER'), os.getenv('HF_PASS'))
33
+ if os.getenv('HF_TOKEN') is not None:
34
+ self.session.headers = {'authorization': f'Bearer {os.getenv("HF_TOKEN")}'}
35
+
36
+ def sanitize_model_and_branch_names(self, model, branch):
37
+ if model[-1] == '/':
38
+ model = model[:-1]
39
+
40
+ if branch is None:
41
+ branch = "main"
42
+ else:
43
+ pattern = re.compile(r"^[a-zA-Z0-9._-]+$")
44
+ if not pattern.match(branch):
45
+ raise ValueError(
46
+ "Invalid branch name. Only alphanumeric characters, period, underscore and dash are allowed.")
47
+
48
+ return model, branch
49
+
50
+ def get_download_links_from_huggingface(self, model, branch, text_only=False, specific_file=None):
51
+ base = "https://huggingface.co"
52
+ page = f"/api/models/{model}/tree/{branch}"
53
+ cursor = b""
54
+
55
+ links = []
56
+ sha256 = []
57
+ classifications = []
58
+ has_pytorch = False
59
+ has_pt = False
60
+ has_gguf = False
61
+ has_ggml = False
62
+ has_safetensors = False
63
+ is_lora = False
64
+ while True:
65
+ url = f"{base}{page}" + (f"?cursor={cursor.decode()}" if cursor else "")
66
+ r = self.session.get(url, timeout=10)
67
+ r.raise_for_status()
68
+ content = r.content
69
+
70
+ dict = json.loads(content)
71
+ if len(dict) == 0:
72
+ break
73
+
74
+ for i in range(len(dict)):
75
+ fname = dict[i]['path']
76
+ if specific_file not in [None, ''] and fname != specific_file:
77
+ continue
78
+
79
+ if not is_lora and fname.endswith(('adapter_config.json', 'adapter_model.bin')):
80
+ is_lora = True
81
+
82
+ is_pytorch = re.match(r"(pytorch|adapter|gptq)_model.*\.bin", fname)
83
+ is_safetensors = re.match(r".*\.safetensors", fname)
84
+ is_pt = re.match(r".*\.pt", fname)
85
+ is_gguf = re.match(r'.*\.gguf', fname)
86
+ is_ggml = re.match(r".*ggml.*\.bin", fname)
87
+ is_tokenizer = re.match(r"(tokenizer|ice|spiece).*\.model", fname)
88
+ is_text = re.match(r".*\.(txt|json|py|md)", fname) or is_tokenizer
89
+ if any((is_pytorch, is_safetensors, is_pt, is_gguf, is_ggml, is_tokenizer, is_text)):
90
+ if 'lfs' in dict[i]:
91
+ sha256.append([fname, dict[i]['lfs']['oid']])
92
+
93
+ if is_text:
94
+ links.append(f"https://huggingface.co/{model}/resolve/{branch}/{fname}")
95
+ classifications.append('text')
96
+ continue
97
+
98
+ if not text_only:
99
+ links.append(f"https://huggingface.co/{model}/resolve/{branch}/{fname}")
100
+ if is_safetensors:
101
+ has_safetensors = True
102
+ classifications.append('safetensors')
103
+ elif is_pytorch:
104
+ has_pytorch = True
105
+ classifications.append('pytorch')
106
+ elif is_pt:
107
+ has_pt = True
108
+ classifications.append('pt')
109
+ elif is_gguf:
110
+ has_gguf = True
111
+ classifications.append('gguf')
112
+ elif is_ggml:
113
+ has_ggml = True
114
+ classifications.append('ggml')
115
+
116
+ cursor = base64.b64encode(f'{{"file_name":"{dict[-1]["path"]}"}}'.encode()) + b':50'
117
+ cursor = base64.b64encode(cursor)
118
+ cursor = cursor.replace(b'=', b'%3D')
119
+
120
+ # If both pytorch and safetensors are available, download safetensors only
121
+ if (has_pytorch or has_pt) and has_safetensors:
122
+ for i in range(len(classifications) - 1, -1, -1):
123
+ if classifications[i] in ['pytorch', 'pt']:
124
+ links.pop(i)
125
+
126
+ # If both GGML and GGUF are available, download GGUF only
127
+ if has_ggml and has_gguf:
128
+ for i in range(len(classifications) - 1, -1, -1):
129
+ if classifications[i] == 'ggml':
130
+ links.pop(i)
131
+
132
+ return links, sha256, is_lora, ((has_ggml or has_gguf) and specific_file is not None)
133
+
134
+ def get_output_folder(self, model, branch, is_lora, is_llamacpp=False, base_folder=None):
135
+ if base_folder is None:
136
+ base_folder = 'models' if not is_lora else 'loras'
137
+
138
+ # If the model is of type GGUF or GGML, save directly in the base_folder
139
+ if is_llamacpp:
140
+ return Path(base_folder)
141
+
142
+ output_folder = f"{'_'.join(model.split('/')[-2:])}"
143
+ if branch != 'main':
144
+ output_folder += f'_{branch}'
145
+
146
+ output_folder = Path(base_folder) / output_folder
147
+ return output_folder
148
+
149
+ def get_single_file(self, url, output_folder, start_from_scratch=False):
150
+ filename = Path(url.rsplit('/', 1)[1])
151
+ output_path = output_folder / filename
152
+ headers = {}
153
+ mode = 'wb'
154
+ if output_path.exists() and not start_from_scratch:
155
+
156
+ # Check if the file has already been downloaded completely
157
+ r = self.session.get(url, stream=True, timeout=10)
158
+ total_size = int(r.headers.get('content-length', 0))
159
+ if output_path.stat().st_size >= total_size:
160
+ return
161
+
162
+ # Otherwise, resume the download from where it left off
163
+ headers = {'Range': f'bytes={output_path.stat().st_size}-'}
164
+ mode = 'ab'
165
+
166
+ with self.session.get(url, stream=True, headers=headers, timeout=10) as r:
167
+ r.raise_for_status() # Do not continue the download if the request was unsuccessful
168
+ total_size = int(r.headers.get('content-length', 0))
169
+ block_size = 1024 * 1024 # 1MB
170
+ with open(output_path, mode) as f:
171
+ with tqdm.tqdm(total=total_size, unit='iB', unit_scale=True, bar_format='{l_bar}{bar}| {n_fmt:6}/{total_fmt:6} {rate_fmt:6}') as t:
172
+ count = 0
173
+ for data in r.iter_content(block_size):
174
+ t.update(len(data))
175
+ f.write(data)
176
+ if total_size != 0 and self.progress_bar is not None:
177
+ count += len(data)
178
+ self.progress_bar(float(count) / float(total_size), f"{filename}")
179
+
180
+ def start_download_threads(self, file_list, output_folder, start_from_scratch=False, threads=1):
181
+ thread_map(lambda url: self.get_single_file(url, output_folder, start_from_scratch=start_from_scratch), file_list, max_workers=threads, disable=True)
182
+
183
+ def download_model_files(self, model, branch, links, sha256, output_folder, progress_bar=None, start_from_scratch=False, threads=1, specific_file=None, is_llamacpp=False):
184
+ self.progress_bar = progress_bar
185
+
186
+ # Create the folder and writing the metadata
187
+ output_folder.mkdir(parents=True, exist_ok=True)
188
+
189
+ if not is_llamacpp:
190
+ metadata = f'url: https://huggingface.co/{model}\n' \
191
+ f'branch: {branch}\n' \
192
+ f'download date: {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")}\n'
193
+
194
+ sha256_str = '\n'.join([f' {item[1]} {item[0]}' for item in sha256])
195
+ if sha256_str:
196
+ metadata += f'sha256sum:\n{sha256_str}'
197
+
198
+ metadata += '\n'
199
+ (output_folder / 'huggingface-metadata.txt').write_text(metadata)
200
+
201
+ if specific_file:
202
+ print(f"Downloading {specific_file} to {output_folder}")
203
+ else:
204
+ print(f"Downloading the model to {output_folder}")
205
+
206
+ self.start_download_threads(links, output_folder, start_from_scratch=start_from_scratch, threads=threads)
207
+
208
+ def check_model_files(self, model, branch, links, sha256, output_folder):
209
+ # Validate the checksums
210
+ validated = True
211
+ for i in range(len(sha256)):
212
+ fpath = (output_folder / sha256[i][0])
213
+
214
+ if not fpath.exists():
215
+ print(f"The following file is missing: {fpath}")
216
+ validated = False
217
+ continue
218
+
219
+ with open(output_folder / sha256[i][0], "rb") as f:
220
+ bytes = f.read()
221
+ file_hash = hashlib.sha256(bytes).hexdigest()
222
+ if file_hash != sha256[i][1]:
223
+ print(f'Checksum failed: {sha256[i][0]} {sha256[i][1]}')
224
+ validated = False
225
+ else:
226
+ print(f'Checksum validated: {sha256[i][0]} {sha256[i][1]}')
227
+
228
+ if validated:
229
+ print('[+] Validated checksums of all model files!')
230
+ else:
231
+ print('[-] Invalid checksums. Rerun download-model.py with the --clean flag.')
232
+
233
+
234
+ if __name__ == '__main__':
235
+
236
+ parser = argparse.ArgumentParser()
237
+ parser.add_argument('MODEL', type=str, default=None, nargs='?')
238
+ parser.add_argument('--branch', type=str, default='main', help='Name of the Git branch to download from.')
239
+ parser.add_argument('--threads', type=int, default=1, help='Number of files to download simultaneously.')
240
+ parser.add_argument('--text-only', action='store_true', help='Only download text files (txt/json).')
241
+ parser.add_argument('--specific-file', type=str, default=None, help='Name of the specific file to download (if not provided, downloads all).')
242
+ parser.add_argument('--output', type=str, default=None, help='The folder where the model should be saved.')
243
+ parser.add_argument('--clean', action='store_true', help='Does not resume the previous download.')
244
+ parser.add_argument('--check', action='store_true', help='Validates the checksums of model files.')
245
+ parser.add_argument('--max-retries', type=int, default=5, help='Max retries count when get error in download time.')
246
+ args = parser.parse_args()
247
+
248
+ branch = args.branch
249
+ model = args.MODEL
250
+ specific_file = args.specific_file
251
+
252
+ if model is None:
253
+ print("Error: Please specify the model you'd like to download (e.g. 'python download-model.py facebook/opt-1.3b').")
254
+ sys.exit()
255
+
256
+ downloader = ModelDownloader(max_retries=args.max_retries)
257
+ # Clean up the model/branch names
258
+ try:
259
+ model, branch = downloader.sanitize_model_and_branch_names(model, branch)
260
+ except ValueError as err_branch:
261
+ print(f"Error: {err_branch}")
262
+ sys.exit()
263
+
264
+ # Get the download links from Hugging Face
265
+ links, sha256, is_lora, is_llamacpp = downloader.get_download_links_from_huggingface(model, branch, text_only=args.text_only, specific_file=specific_file)
266
+
267
+ # Get the output folder
268
+ output_folder = downloader.get_output_folder(model, branch, is_lora, is_llamacpp=is_llamacpp, base_folder=args.output)
269
+
270
+ if args.check:
271
+ # Check previously downloaded files
272
+ downloader.check_model_files(model, branch, links, sha256, output_folder)
273
+ else:
274
+ # Download files
275
+ downloader.download_model_files(model, branch, links, sha256, output_folder, specific_file=specific_file, threads=args.threads, is_llamacpp=is_llamacpp)
extensions/api/blocking_api.py ADDED
@@ -0,0 +1,224 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from http.server import BaseHTTPRequestHandler, ThreadingHTTPServer
3
+ from threading import Thread
4
+
5
+ from extensions.api.util import build_parameters, try_start_cloudflared
6
+ from modules import shared
7
+ from modules.chat import generate_chat_reply
8
+ from modules.LoRA import add_lora_to_model
9
+ from modules.models import load_model, unload_model
10
+ from modules.models_settings import (
11
+ get_model_settings_from_yamls,
12
+ update_model_parameters
13
+ )
14
+ from modules.text_generation import (
15
+ encode,
16
+ generate_reply,
17
+ stop_everything_event
18
+ )
19
+ from modules.utils import get_available_models
20
+
21
+
22
+ def get_model_info():
23
+ return {
24
+ 'model_name': shared.model_name,
25
+ 'lora_names': shared.lora_names,
26
+ # dump
27
+ 'shared.settings': shared.settings,
28
+ 'shared.args': vars(shared.args),
29
+ }
30
+
31
+
32
+ class Handler(BaseHTTPRequestHandler):
33
+ def do_GET(self):
34
+ if self.path == '/api/v1/model':
35
+ self.send_response(200)
36
+ self.end_headers()
37
+ response = json.dumps({
38
+ 'result': shared.model_name
39
+ })
40
+
41
+ self.wfile.write(response.encode('utf-8'))
42
+ else:
43
+ self.send_error(404)
44
+
45
+ def do_POST(self):
46
+ content_length = int(self.headers['Content-Length'])
47
+ body = json.loads(self.rfile.read(content_length).decode('utf-8'))
48
+
49
+ if self.path == '/api/v1/generate':
50
+ self.send_response(200)
51
+ self.send_header('Content-Type', 'application/json')
52
+ self.end_headers()
53
+
54
+ prompt = body['prompt']
55
+ generate_params = build_parameters(body)
56
+ stopping_strings = generate_params.pop('stopping_strings')
57
+ generate_params['stream'] = False
58
+
59
+ generator = generate_reply(
60
+ prompt, generate_params, stopping_strings=stopping_strings, is_chat=False)
61
+
62
+ answer = ''
63
+ for a in generator:
64
+ answer = a
65
+
66
+ response = json.dumps({
67
+ 'results': [{
68
+ 'text': answer
69
+ }]
70
+ })
71
+
72
+ self.wfile.write(response.encode('utf-8'))
73
+
74
+ elif self.path == '/api/v1/chat':
75
+ self.send_response(200)
76
+ self.send_header('Content-Type', 'application/json')
77
+ self.end_headers()
78
+
79
+ user_input = body['user_input']
80
+ regenerate = body.get('regenerate', False)
81
+ _continue = body.get('_continue', False)
82
+
83
+ generate_params = build_parameters(body, chat=True)
84
+ generate_params['stream'] = False
85
+
86
+ generator = generate_chat_reply(
87
+ user_input, generate_params, regenerate=regenerate, _continue=_continue, loading_message=False)
88
+
89
+ answer = generate_params['history']
90
+ for a in generator:
91
+ answer = a
92
+
93
+ response = json.dumps({
94
+ 'results': [{
95
+ 'history': answer
96
+ }]
97
+ })
98
+
99
+ self.wfile.write(response.encode('utf-8'))
100
+
101
+ elif self.path == '/api/v1/stop-stream':
102
+ self.send_response(200)
103
+ self.send_header('Content-Type', 'application/json')
104
+ self.end_headers()
105
+
106
+ stop_everything_event()
107
+
108
+ response = json.dumps({
109
+ 'results': 'success'
110
+ })
111
+
112
+ self.wfile.write(response.encode('utf-8'))
113
+
114
+ elif self.path == '/api/v1/model':
115
+ self.send_response(200)
116
+ self.send_header('Content-Type', 'application/json')
117
+ self.end_headers()
118
+
119
+ # by default return the same as the GET interface
120
+ result = shared.model_name
121
+
122
+ # Actions: info, load, list, unload
123
+ action = body.get('action', '')
124
+
125
+ if action == 'load':
126
+ model_name = body['model_name']
127
+ args = body.get('args', {})
128
+ print('args', args)
129
+ for k in args:
130
+ setattr(shared.args, k, args[k])
131
+
132
+ shared.model_name = model_name
133
+ unload_model()
134
+
135
+ model_settings = get_model_settings_from_yamls(shared.model_name)
136
+ shared.settings.update(model_settings)
137
+ update_model_parameters(model_settings, initial=True)
138
+
139
+ if shared.settings['mode'] != 'instruct':
140
+ shared.settings['instruction_template'] = None
141
+
142
+ try:
143
+ shared.model, shared.tokenizer = load_model(shared.model_name)
144
+ if shared.args.lora:
145
+ add_lora_to_model(shared.args.lora) # list
146
+
147
+ except Exception as e:
148
+ response = json.dumps({'error': {'message': repr(e)}})
149
+
150
+ self.wfile.write(response.encode('utf-8'))
151
+ raise e
152
+
153
+ shared.args.model = shared.model_name
154
+
155
+ result = get_model_info()
156
+
157
+ elif action == 'unload':
158
+ unload_model()
159
+ shared.model_name = None
160
+ shared.args.model = None
161
+ result = get_model_info()
162
+
163
+ elif action == 'list':
164
+ result = get_available_models()
165
+
166
+ elif action == 'info':
167
+ result = get_model_info()
168
+
169
+ response = json.dumps({
170
+ 'result': result,
171
+ })
172
+
173
+ self.wfile.write(response.encode('utf-8'))
174
+
175
+ elif self.path == '/api/v1/token-count':
176
+ self.send_response(200)
177
+ self.send_header('Content-Type', 'application/json')
178
+ self.end_headers()
179
+
180
+ tokens = encode(body['prompt'])[0]
181
+ response = json.dumps({
182
+ 'results': [{
183
+ 'tokens': len(tokens)
184
+ }]
185
+ })
186
+
187
+ self.wfile.write(response.encode('utf-8'))
188
+ else:
189
+ self.send_error(404)
190
+
191
+ def do_OPTIONS(self):
192
+ self.send_response(200)
193
+ self.end_headers()
194
+
195
+ def end_headers(self):
196
+ self.send_header('Access-Control-Allow-Origin', '*')
197
+ self.send_header('Access-Control-Allow-Methods', '*')
198
+ self.send_header('Access-Control-Allow-Headers', '*')
199
+ self.send_header('Cache-Control', 'no-store, no-cache, must-revalidate')
200
+ super().end_headers()
201
+
202
+
203
+ def _run_server(port: int, share: bool = False, tunnel_id=str):
204
+ address = '0.0.0.0' if shared.args.listen else '127.0.0.1'
205
+
206
+ server = ThreadingHTTPServer((address, port), Handler)
207
+
208
+ def on_start(public_url: str):
209
+ print(f'Starting non-streaming server at public url {public_url}/api')
210
+
211
+ if share:
212
+ try:
213
+ try_start_cloudflared(port, tunnel_id, max_attempts=3, on_start=on_start)
214
+ except Exception:
215
+ pass
216
+ else:
217
+ print(
218
+ f'Starting API at http://{address}:{port}/api')
219
+
220
+ server.serve_forever()
221
+
222
+
223
+ def start_server(port: int, share: bool = False, tunnel_id=str):
224
+ Thread(target=_run_server, args=[port, share, tunnel_id], daemon=True).start()
extensions/api/requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ flask_cloudflared==0.0.14
2
+ websockets==11.0.2
extensions/api/script.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+
3
+ import extensions.api.blocking_api as blocking_api
4
+ import extensions.api.streaming_api as streaming_api
5
+ from modules import shared
6
+
7
+
8
+ def setup():
9
+ blocking_api.start_server(shared.args.api_blocking_port, share=shared.args.public_api, tunnel_id=shared.args.public_api_id)
10
+ if shared.args.public_api:
11
+ time.sleep(5)
12
+
13
+ streaming_api.start_server(shared.args.api_streaming_port, share=shared.args.public_api, tunnel_id=shared.args.public_api_id)
extensions/api/streaming_api.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import json
3
+ from threading import Thread
4
+
5
+ from extensions.api.util import (
6
+ build_parameters,
7
+ try_start_cloudflared,
8
+ with_api_lock
9
+ )
10
+ from modules import shared
11
+ from modules.chat import generate_chat_reply
12
+ from modules.text_generation import generate_reply
13
+ from websockets.server import serve
14
+
15
+ PATH = '/api/v1/stream'
16
+
17
+
18
+ @with_api_lock
19
+ async def _handle_stream_message(websocket, message):
20
+ message = json.loads(message)
21
+
22
+ prompt = message['prompt']
23
+ generate_params = build_parameters(message)
24
+ stopping_strings = generate_params.pop('stopping_strings')
25
+ generate_params['stream'] = True
26
+
27
+ generator = generate_reply(
28
+ prompt, generate_params, stopping_strings=stopping_strings, is_chat=False)
29
+
30
+ # As we stream, only send the new bytes.
31
+ skip_index = 0
32
+ message_num = 0
33
+
34
+ for a in generator:
35
+ to_send = a[skip_index:]
36
+ if to_send is None or chr(0xfffd) in to_send: # partial unicode character, don't send it yet.
37
+ continue
38
+
39
+ await websocket.send(json.dumps({
40
+ 'event': 'text_stream',
41
+ 'message_num': message_num,
42
+ 'text': to_send
43
+ }))
44
+
45
+ await asyncio.sleep(0)
46
+ skip_index += len(to_send)
47
+ message_num += 1
48
+
49
+ await websocket.send(json.dumps({
50
+ 'event': 'stream_end',
51
+ 'message_num': message_num
52
+ }))
53
+
54
+
55
+ @with_api_lock
56
+ async def _handle_chat_stream_message(websocket, message):
57
+ body = json.loads(message)
58
+
59
+ user_input = body['user_input']
60
+ generate_params = build_parameters(body, chat=True)
61
+ generate_params['stream'] = True
62
+ regenerate = body.get('regenerate', False)
63
+ _continue = body.get('_continue', False)
64
+
65
+ generator = generate_chat_reply(
66
+ user_input, generate_params, regenerate=regenerate, _continue=_continue, loading_message=False)
67
+
68
+ message_num = 0
69
+ for a in generator:
70
+ await websocket.send(json.dumps({
71
+ 'event': 'text_stream',
72
+ 'message_num': message_num,
73
+ 'history': a
74
+ }))
75
+
76
+ await asyncio.sleep(0)
77
+ message_num += 1
78
+
79
+ await websocket.send(json.dumps({
80
+ 'event': 'stream_end',
81
+ 'message_num': message_num
82
+ }))
83
+
84
+
85
+ async def _handle_connection(websocket, path):
86
+
87
+ if path == '/api/v1/stream':
88
+ async for message in websocket:
89
+ await _handle_stream_message(websocket, message)
90
+
91
+ elif path == '/api/v1/chat-stream':
92
+ async for message in websocket:
93
+ await _handle_chat_stream_message(websocket, message)
94
+
95
+ else:
96
+ print(f'Streaming api: unknown path: {path}')
97
+ return
98
+
99
+
100
+ async def _run(host: str, port: int):
101
+ async with serve(_handle_connection, host, port, ping_interval=None):
102
+ await asyncio.Future() # run forever
103
+
104
+
105
+ def _run_server(port: int, share: bool = False, tunnel_id=str):
106
+ address = '0.0.0.0' if shared.args.listen else '127.0.0.1'
107
+
108
+ def on_start(public_url: str):
109
+ public_url = public_url.replace('https://', 'wss://')
110
+ print(f'Starting streaming server at public url {public_url}{PATH}')
111
+
112
+ if share:
113
+ try:
114
+ try_start_cloudflared(port, tunnel_id, max_attempts=3, on_start=on_start)
115
+ except Exception as e:
116
+ print(e)
117
+ else:
118
+ print(f'Starting streaming server at ws://{address}:{port}{PATH}')
119
+
120
+ asyncio.run(_run(host=address, port=port))
121
+
122
+
123
+ def start_server(port: int, share: bool = False, tunnel_id=str):
124
+ Thread(target=_run_server, args=[port, share, tunnel_id], daemon=True).start()
extensions/api/util.py ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import functools
3
+ import threading
4
+ import time
5
+ import traceback
6
+ from threading import Thread
7
+ from typing import Callable, Optional
8
+
9
+ from modules import shared
10
+ from modules.chat import load_character_memoized
11
+ from modules.presets import load_preset_memoized
12
+
13
+ # We use a thread local to store the asyncio lock, so that each thread
14
+ # has its own lock. This isn't strictly necessary, but it makes it
15
+ # such that if we can support multiple worker threads in the future,
16
+ # thus handling multiple requests in parallel.
17
+ api_tls = threading.local()
18
+
19
+
20
+ def build_parameters(body, chat=False):
21
+
22
+ generate_params = {
23
+ 'max_new_tokens': int(body.get('max_new_tokens', body.get('max_length', 200))),
24
+ 'auto_max_new_tokens': bool(body.get('auto_max_new_tokens', False)),
25
+ 'max_tokens_second': int(body.get('max_tokens_second', 0)),
26
+ 'do_sample': bool(body.get('do_sample', True)),
27
+ 'temperature': float(body.get('temperature', 0.5)),
28
+ 'top_p': float(body.get('top_p', 1)),
29
+ 'typical_p': float(body.get('typical_p', body.get('typical', 1))),
30
+ 'epsilon_cutoff': float(body.get('epsilon_cutoff', 0)),
31
+ 'eta_cutoff': float(body.get('eta_cutoff', 0)),
32
+ 'tfs': float(body.get('tfs', 1)),
33
+ 'top_a': float(body.get('top_a', 0)),
34
+ 'repetition_penalty': float(body.get('repetition_penalty', body.get('rep_pen', 1.1))),
35
+ 'repetition_penalty_range': int(body.get('repetition_penalty_range', 0)),
36
+ 'encoder_repetition_penalty': float(body.get('encoder_repetition_penalty', 1.0)),
37
+ 'top_k': int(body.get('top_k', 0)),
38
+ 'min_length': int(body.get('min_length', 0)),
39
+ 'no_repeat_ngram_size': int(body.get('no_repeat_ngram_size', 0)),
40
+ 'num_beams': int(body.get('num_beams', 1)),
41
+ 'penalty_alpha': float(body.get('penalty_alpha', 0)),
42
+ 'length_penalty': float(body.get('length_penalty', 1)),
43
+ 'early_stopping': bool(body.get('early_stopping', False)),
44
+ 'mirostat_mode': int(body.get('mirostat_mode', 0)),
45
+ 'mirostat_tau': float(body.get('mirostat_tau', 5)),
46
+ 'mirostat_eta': float(body.get('mirostat_eta', 0.1)),
47
+ 'guidance_scale': float(body.get('guidance_scale', 1)),
48
+ 'negative_prompt': str(body.get('negative_prompt', '')),
49
+ 'seed': int(body.get('seed', -1)),
50
+ 'add_bos_token': bool(body.get('add_bos_token', True)),
51
+ 'truncation_length': int(body.get('truncation_length', body.get('max_context_length', 2048))),
52
+ 'ban_eos_token': bool(body.get('ban_eos_token', False)),
53
+ 'skip_special_tokens': bool(body.get('skip_special_tokens', True)),
54
+ 'custom_stopping_strings': '', # leave this blank
55
+ 'stopping_strings': body.get('stopping_strings', []),
56
+ }
57
+
58
+ preset_name = body.get('preset', 'None')
59
+ if preset_name not in ['None', None, '']:
60
+ preset = load_preset_memoized(preset_name)
61
+ generate_params.update(preset)
62
+
63
+ if chat:
64
+ character = body.get('character')
65
+ instruction_template = body.get('instruction_template', shared.settings['instruction_template'])
66
+ if str(instruction_template) == "None":
67
+ instruction_template = "Vicuna-v1.1"
68
+
69
+ name1, name2, _, greeting, context, _ = load_character_memoized(character, str(body.get('your_name', shared.settings['name1'])), shared.settings['name2'], instruct=False)
70
+ name1_instruct, name2_instruct, _, _, context_instruct, turn_template = load_character_memoized(instruction_template, '', '', instruct=True)
71
+ generate_params.update({
72
+ 'mode': str(body.get('mode', 'chat')),
73
+ 'name1': str(body.get('name1', name1)),
74
+ 'name2': str(body.get('name2', name2)),
75
+ 'context': str(body.get('context', context)),
76
+ 'greeting': str(body.get('greeting', greeting)),
77
+ 'name1_instruct': str(body.get('name1_instruct', name1_instruct)),
78
+ 'name2_instruct': str(body.get('name2_instruct', name2_instruct)),
79
+ 'context_instruct': str(body.get('context_instruct', context_instruct)),
80
+ 'turn_template': str(body.get('turn_template', turn_template)),
81
+ 'chat-instruct_command': str(body.get('chat_instruct_command', body.get('chat-instruct_command', shared.settings['chat-instruct_command']))),
82
+ 'history': body.get('history', {'internal': [], 'visible': []})
83
+ })
84
+
85
+ return generate_params
86
+
87
+
88
+ def try_start_cloudflared(port: int, tunnel_id: str, max_attempts: int = 3, on_start: Optional[Callable[[str], None]] = None):
89
+ Thread(target=_start_cloudflared, args=[
90
+ port, tunnel_id, max_attempts, on_start], daemon=True).start()
91
+
92
+
93
+ def _start_cloudflared(port: int, tunnel_id: str, max_attempts: int = 3, on_start: Optional[Callable[[str], None]] = None):
94
+ try:
95
+ from flask_cloudflared import _run_cloudflared
96
+ except ImportError:
97
+ print('You should install flask_cloudflared manually')
98
+ raise Exception(
99
+ 'flask_cloudflared not installed. Make sure you installed the requirements.txt for this extension.')
100
+
101
+ for _ in range(max_attempts):
102
+ try:
103
+ if tunnel_id is not None:
104
+ public_url = _run_cloudflared(port, port + 1, tunnel_id=tunnel_id)
105
+ else:
106
+ public_url = _run_cloudflared(port, port + 1)
107
+
108
+ if on_start:
109
+ on_start(public_url)
110
+
111
+ return
112
+ except Exception:
113
+ traceback.print_exc()
114
+ time.sleep(3)
115
+
116
+ raise Exception('Could not start cloudflared.')
117
+
118
+
119
+ def _get_api_lock(tls) -> asyncio.Lock:
120
+ """
121
+ The streaming and blocking API implementations each run on their own
122
+ thread, and multiplex requests using asyncio. If multiple outstanding
123
+ requests are received at once, we will try to acquire the shared lock
124
+ shared.generation_lock multiple times in succession in the same thread,
125
+ which will cause a deadlock.
126
+
127
+ To avoid this, we use this wrapper function to block on an asyncio
128
+ lock, and then try and grab the shared lock only while holding
129
+ the asyncio lock.
130
+ """
131
+ if not hasattr(tls, "asyncio_lock"):
132
+ tls.asyncio_lock = asyncio.Lock()
133
+
134
+ return tls.asyncio_lock
135
+
136
+
137
+ def with_api_lock(func):
138
+ """
139
+ This decorator should be added to all streaming API methods which
140
+ require access to the shared.generation_lock. It ensures that the
141
+ tls.asyncio_lock is acquired before the method is called, and
142
+ released afterwards.
143
+ """
144
+ @functools.wraps(func)
145
+ async def api_wrapper(*args, **kwargs):
146
+ async with _get_api_lock(api_tls):
147
+ return await func(*args, **kwargs)
148
+ return api_wrapper
extensions/character_bias/script.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import gradio as gr
4
+
5
+ # get the current directory of the script
6
+ current_dir = os.path.dirname(os.path.abspath(__file__))
7
+
8
+ # check if the bias_options.txt file exists, if not, create it
9
+ bias_file = os.path.join(current_dir, "bias_options.txt")
10
+ if not os.path.isfile(bias_file):
11
+ with open(bias_file, "w") as f:
12
+ f.write("*I am so happy*\n*I am so sad*\n*I am so excited*\n*I am so bored*\n*I am so angry*")
13
+
14
+ # read bias options from the text file
15
+ with open(bias_file, "r") as f:
16
+ bias_options = [line.strip() for line in f.readlines()]
17
+
18
+ params = {
19
+ "activate": True,
20
+ "bias string": " *I am so happy*",
21
+ "use custom string": False,
22
+ }
23
+
24
+
25
+ def input_modifier(string):
26
+ """
27
+ This function is applied to your text inputs before
28
+ they are fed into the model.
29
+ """
30
+ return string
31
+
32
+
33
+ def output_modifier(string):
34
+ """
35
+ This function is applied to the model outputs.
36
+ """
37
+ return string
38
+
39
+
40
+ def bot_prefix_modifier(string):
41
+ """
42
+ This function is only applied in chat mode. It modifies
43
+ the prefix text for the Bot and can be used to bias its
44
+ behavior.
45
+ """
46
+ if params['activate']:
47
+ if params['use custom string']:
48
+ return f'{string} {params["custom string"].strip()} '
49
+ else:
50
+ return f'{string} {params["bias string"].strip()} '
51
+ else:
52
+ return string
53
+
54
+
55
+ def ui():
56
+ # Gradio elements
57
+ activate = gr.Checkbox(value=params['activate'], label='Activate character bias')
58
+ dropdown_string = gr.Dropdown(choices=bias_options, value=params["bias string"], label='Character bias', info='To edit the options in this dropdown edit the "bias_options.txt" file')
59
+ use_custom_string = gr.Checkbox(value=False, label='Use custom bias textbox instead of dropdown')
60
+ custom_string = gr.Textbox(value="", placeholder="Enter custom bias string", label="Custom Character Bias", info='To use this textbox activate the checkbox above')
61
+
62
+ # Event functions to update the parameters in the backend
63
+ def update_bias_string(x):
64
+ if x:
65
+ params.update({"bias string": x})
66
+ else:
67
+ params.update({"bias string": dropdown_string.get()})
68
+ return x
69
+
70
+ def update_custom_string(x):
71
+ params.update({"custom string": x})
72
+
73
+ dropdown_string.change(update_bias_string, dropdown_string, None)
74
+ custom_string.change(update_custom_string, custom_string, None)
75
+ activate.change(lambda x: params.update({"activate": x}), activate, None)
76
+ use_custom_string.change(lambda x: params.update({"use custom string": x}), use_custom_string, None)
77
+
78
+ # Group elements together depending on the selected option
79
+ def bias_string_group():
80
+ if use_custom_string.value:
81
+ return gr.Group([use_custom_string, custom_string])
82
+ else:
83
+ return dropdown_string
extensions/elevenlabs_tts/outputs/outputs-will-be-saved-here.txt ADDED
File without changes