Spaces:
Runtime error
Runtime error
Upload 5 files
Browse files- LICENSE.txt +126 -0
- app.py +280 -0
- model.py +70 -0
- requirements.txt +8 -0
- style.css +16 -0
LICENSE.txt
ADDED
@@ -0,0 +1,126 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
LLAMA 2 COMMUNITY LICENSE AGREEMENT
|
2 |
+
Llama 2 Version Release Date: July 18, 2023
|
3 |
+
|
4 |
+
"Agreement" means the terms and conditions for use, reproduction, distribution and
|
5 |
+
modification of the Llama Materials set forth herein.
|
6 |
+
|
7 |
+
"Documentation" means the specifications, manuals and documentation
|
8 |
+
accompanying Llama 2 distributed by Meta at ai.meta.com/resources/models-and-
|
9 |
+
libraries/llama-downloads/.
|
10 |
+
|
11 |
+
"Licensee" or "you" means you, or your employer or any other person or entity (if
|
12 |
+
you are entering into this Agreement on such person or entity's behalf), of the age
|
13 |
+
required under applicable laws, rules or regulations to provide legal consent and that
|
14 |
+
has legal authority to bind your employer or such other person or entity if you are
|
15 |
+
entering in this Agreement on their behalf.
|
16 |
+
|
17 |
+
"Llama 2" means the foundational large language models and software and
|
18 |
+
algorithms, including machine-learning model code, trained model weights,
|
19 |
+
inference-enabling code, training-enabling code, fine-tuning enabling code and other
|
20 |
+
elements of the foregoing distributed by Meta at ai.meta.com/resources/models-and-
|
21 |
+
libraries/llama-downloads/.
|
22 |
+
|
23 |
+
"Llama Materials" means, collectively, Meta's proprietary Llama 2 and
|
24 |
+
Documentation (and any portion thereof) made available under this Agreement.
|
25 |
+
|
26 |
+
"Meta" or "we" means Meta Platforms Ireland Limited (if you are located in or, if you
|
27 |
+
are an entity, your principal place of business is in the EEA or Switzerland) and Meta
|
28 |
+
Platforms, Inc. (if you are located outside of the EEA or Switzerland).
|
29 |
+
|
30 |
+
By clicking "I Accept" below or by using or distributing any portion or element of the
|
31 |
+
Llama Materials, you agree to be bound by this Agreement.
|
32 |
+
|
33 |
+
1. License Rights and Redistribution.
|
34 |
+
|
35 |
+
a. Grant of Rights. You are granted a non-exclusive, worldwide, non-
|
36 |
+
transferable and royalty-free limited license under Meta's intellectual property or
|
37 |
+
other rights owned by Meta embodied in the Llama Materials to use, reproduce,
|
38 |
+
distribute, copy, create derivative works of, and make modifications to the Llama
|
39 |
+
Materials.
|
40 |
+
|
41 |
+
b. Redistribution and Use.
|
42 |
+
|
43 |
+
i. If you distribute or make the Llama Materials, or any derivative works
|
44 |
+
thereof, available to a third party, you shall provide a copy of this Agreement to such
|
45 |
+
third party.
|
46 |
+
ii. If you receive Llama Materials, or any derivative works thereof, from
|
47 |
+
a Licensee as part of an integrated end user product, then Section 2 of this
|
48 |
+
Agreement will not apply to you.
|
49 |
+
|
50 |
+
iii. You must retain in all copies of the Llama Materials that you
|
51 |
+
distribute the following attribution notice within a "Notice" text file distributed as a
|
52 |
+
part of such copies: "Llama 2 is licensed under the LLAMA 2 Community License,
|
53 |
+
Copyright (c) Meta Platforms, Inc. All Rights Reserved."
|
54 |
+
|
55 |
+
iv. Your use of the Llama Materials must comply with applicable laws
|
56 |
+
and regulations (including trade compliance laws and regulations) and adhere to the
|
57 |
+
Acceptable Use Policy for the Llama Materials (available at
|
58 |
+
https://ai.meta.com/llama/use-policy), which is hereby incorporated by reference into
|
59 |
+
this Agreement.
|
60 |
+
|
61 |
+
v. You will not use the Llama Materials or any output or results of the
|
62 |
+
Llama Materials to improve any other large language model (excluding Llama 2 or
|
63 |
+
derivative works thereof).
|
64 |
+
|
65 |
+
2. Additional Commercial Terms. If, on the Llama 2 version release date, the
|
66 |
+
monthly active users of the products or services made available by or for Licensee,
|
67 |
+
or Licensee's affiliates, is greater than 700 million monthly active users in the
|
68 |
+
preceding calendar month, you must request a license from Meta, which Meta may
|
69 |
+
grant to you in its sole discretion, and you are not authorized to exercise any of the
|
70 |
+
rights under this Agreement unless or until Meta otherwise expressly grants you
|
71 |
+
such rights.
|
72 |
+
|
73 |
+
3. Disclaimer of Warranty. UNLESS REQUIRED BY APPLICABLE LAW, THE
|
74 |
+
LLAMA MATERIALS AND ANY OUTPUT AND RESULTS THEREFROM ARE
|
75 |
+
PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
|
76 |
+
EITHER EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY
|
77 |
+
WARRANTIES OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY, OR
|
78 |
+
FITNESS FOR A PARTICULAR PURPOSE. YOU ARE SOLELY RESPONSIBLE
|
79 |
+
FOR DETERMINING THE APPROPRIATENESS OF USING OR REDISTRIBUTING
|
80 |
+
THE LLAMA MATERIALS AND ASSUME ANY RISKS ASSOCIATED WITH YOUR
|
81 |
+
USE OF THE LLAMA MATERIALS AND ANY OUTPUT AND RESULTS.
|
82 |
+
|
83 |
+
4. Limitation of Liability. IN NO EVENT WILL META OR ITS AFFILIATES BE
|
84 |
+
LIABLE UNDER ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, TORT,
|
85 |
+
NEGLIGENCE, PRODUCTS LIABILITY, OR OTHERWISE, ARISING OUT OF THIS
|
86 |
+
AGREEMENT, FOR ANY LOST PROFITS OR ANY INDIRECT, SPECIAL,
|
87 |
+
CONSEQUENTIAL, INCIDENTAL, EXEMPLARY OR PUNITIVE DAMAGES, EVEN
|
88 |
+
IF META OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF
|
89 |
+
ANY OF THE FOREGOING.
|
90 |
+
|
91 |
+
5. Intellectual Property.
|
92 |
+
|
93 |
+
a. No trademark licenses are granted under this Agreement, and in
|
94 |
+
connection with the Llama Materials, neither Meta nor Licensee may use any name
|
95 |
+
or mark owned by or associated with the other or any of its affiliates, except as
|
96 |
+
required for reasonable and customary use in describing and redistributing the
|
97 |
+
Llama Materials.
|
98 |
+
|
99 |
+
b. Subject to Meta's ownership of Llama Materials and derivatives made by or
|
100 |
+
for Meta, with respect to any derivative works and modifications of the Llama
|
101 |
+
Materials that are made by you, as between you and Meta, you are and will be the
|
102 |
+
owner of such derivative works and modifications.
|
103 |
+
|
104 |
+
c. If you institute litigation or other proceedings against Meta or any entity
|
105 |
+
(including a cross-claim or counterclaim in a lawsuit) alleging that the Llama
|
106 |
+
Materials or Llama 2 outputs or results, or any portion of any of the foregoing,
|
107 |
+
constitutes infringement of intellectual property or other rights owned or licensable
|
108 |
+
by you, then any licenses granted to you under this Agreement shall terminate as of
|
109 |
+
the date such litigation or claim is filed or instituted. You will indemnify and hold
|
110 |
+
harmless Meta from and against any claim by any third party arising out of or related
|
111 |
+
to your use or distribution of the Llama Materials.
|
112 |
+
|
113 |
+
6. Term and Termination. The term of this Agreement will commence upon your
|
114 |
+
acceptance of this Agreement or access to the Llama Materials and will continue in
|
115 |
+
full force and effect until terminated in accordance with the terms and conditions
|
116 |
+
herein. Meta may terminate this Agreement if you are in breach of any term or
|
117 |
+
condition of this Agreement. Upon termination of this Agreement, you shall delete
|
118 |
+
and cease use of the Llama Materials. Sections 3, 4 and 7 shall survive the
|
119 |
+
termination of this Agreement.
|
120 |
+
|
121 |
+
7. Governing Law and Jurisdiction. This Agreement will be governed and
|
122 |
+
construed under the laws of the State of California without regard to choice of law
|
123 |
+
principles, and the UN Convention on Contracts for the International Sale of Goods
|
124 |
+
does not apply to this Agreement. The courts of California shall have exclusive
|
125 |
+
jurisdiction of any dispute arising out of this Agreement.
|
126 |
+
|
app.py
ADDED
@@ -0,0 +1,280 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Iterator
|
2 |
+
|
3 |
+
import gradio as gr
|
4 |
+
import torch
|
5 |
+
|
6 |
+
from model import get_input_token_length, run
|
7 |
+
|
8 |
+
DEFAULT_SYSTEM_PROMPT = """\
|
9 |
+
You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.
|
10 |
+
|
11 |
+
If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.\
|
12 |
+
"""
|
13 |
+
MAX_MAX_NEW_TOKENS = 2048
|
14 |
+
DEFAULT_MAX_NEW_TOKENS = 1024
|
15 |
+
MAX_INPUT_TOKEN_LENGTH = 4000
|
16 |
+
|
17 |
+
DESCRIPTION = """
|
18 |
+
# Llama-2 7B Chat
|
19 |
+
|
20 |
+
This Space demonstrates model [Llama-2-7b-chat](https://huggingface.co/meta-llama/Llama-2-7b-chat) by Meta, a Llama 2 model with 7B parameters fine-tuned for chat instructions. Feel free to play with it, or duplicate to run generations without a queue! If you want to run your own service, you can also [deploy the model on Inference Endpoints](https://huggingface.co/inference-endpoints).
|
21 |
+
|
22 |
+
🔎 For more details about the Llama 2 family of models and how to use them with `transformers`, take a look [at our blog post](https://huggingface.co/blog/llama2).
|
23 |
+
|
24 |
+
🔨 Looking for an even more powerful model? Check out the [13B version](https://huggingface.co/spaces/huggingface-projects/llama-2-13b-chat) or the large [70B model demo](https://huggingface.co/spaces/ysharma/Explore_llamav2_with_TGI).
|
25 |
+
"""
|
26 |
+
|
27 |
+
LICENSE = """
|
28 |
+
<p/>
|
29 |
+
|
30 |
+
---
|
31 |
+
As a derivate work of [Llama-2-7b-chat](https://huggingface.co/meta-llama/Llama-2-7b-chat) by Meta,
|
32 |
+
this demo is governed by the original [license](https://huggingface.co/spaces/huggingface-projects/llama-2-7b-chat/blob/main/LICENSE.txt) and [acceptable use policy](https://huggingface.co/spaces/huggingface-projects/llama-2-7b-chat/blob/main/USE_POLICY.md).
|
33 |
+
"""
|
34 |
+
|
35 |
+
if not torch.cuda.is_available():
|
36 |
+
DESCRIPTION += '\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>'
|
37 |
+
|
38 |
+
|
39 |
+
def clear_and_save_textbox(message: str) -> tuple[str, str]:
|
40 |
+
return '', message
|
41 |
+
|
42 |
+
|
43 |
+
def display_input(message: str,
|
44 |
+
history: list[tuple[str, str]]) -> list[tuple[str, str]]:
|
45 |
+
history.append((message, ''))
|
46 |
+
return history
|
47 |
+
|
48 |
+
|
49 |
+
def delete_prev_fn(
|
50 |
+
history: list[tuple[str, str]]) -> tuple[list[tuple[str, str]], str]:
|
51 |
+
try:
|
52 |
+
message, _ = history.pop()
|
53 |
+
except IndexError:
|
54 |
+
message = ''
|
55 |
+
return history, message or ''
|
56 |
+
|
57 |
+
|
58 |
+
def generate(
|
59 |
+
message: str,
|
60 |
+
history_with_input: list[tuple[str, str]],
|
61 |
+
system_prompt: str,
|
62 |
+
max_new_tokens: int,
|
63 |
+
temperature: float,
|
64 |
+
top_p: float,
|
65 |
+
top_k: int,
|
66 |
+
) -> Iterator[list[tuple[str, str]]]:
|
67 |
+
if max_new_tokens > MAX_MAX_NEW_TOKENS:
|
68 |
+
raise ValueError
|
69 |
+
|
70 |
+
history = history_with_input[:-1]
|
71 |
+
generator = run(message, history, system_prompt, max_new_tokens, temperature, top_p, top_k)
|
72 |
+
try:
|
73 |
+
first_response = next(generator)
|
74 |
+
yield history + [(message, first_response)]
|
75 |
+
except StopIteration:
|
76 |
+
yield history + [(message, '')]
|
77 |
+
for response in generator:
|
78 |
+
yield history + [(message, response)]
|
79 |
+
|
80 |
+
|
81 |
+
def process_example(message: str) -> tuple[str, list[tuple[str, str]]]:
|
82 |
+
generator = generate(message, [], DEFAULT_SYSTEM_PROMPT, 1024, 1, 0.95, 50)
|
83 |
+
for x in generator:
|
84 |
+
pass
|
85 |
+
return '', x
|
86 |
+
|
87 |
+
|
88 |
+
def check_input_token_length(message: str, chat_history: list[tuple[str, str]], system_prompt: str) -> None:
|
89 |
+
input_token_length = get_input_token_length(message, chat_history, system_prompt)
|
90 |
+
if input_token_length > MAX_INPUT_TOKEN_LENGTH:
|
91 |
+
raise gr.Error(f'The accumulated input is too long ({input_token_length} > {MAX_INPUT_TOKEN_LENGTH}). Clear your chat history and try again.')
|
92 |
+
|
93 |
+
|
94 |
+
with gr.Blocks(css='style.css') as demo:
|
95 |
+
gr.Markdown(DESCRIPTION)
|
96 |
+
gr.DuplicateButton(value='Duplicate Space for private use',
|
97 |
+
elem_id='duplicate-button')
|
98 |
+
|
99 |
+
with gr.Group():
|
100 |
+
chatbot = gr.Chatbot(label='Chatbot')
|
101 |
+
with gr.Row():
|
102 |
+
textbox = gr.Textbox(
|
103 |
+
container=False,
|
104 |
+
show_label=False,
|
105 |
+
placeholder='Type a message...',
|
106 |
+
scale=10,
|
107 |
+
)
|
108 |
+
submit_button = gr.Button('Submit',
|
109 |
+
variant='primary',
|
110 |
+
scale=1,
|
111 |
+
min_width=0)
|
112 |
+
with gr.Row():
|
113 |
+
retry_button = gr.Button('🔄 Retry', variant='secondary')
|
114 |
+
undo_button = gr.Button('↩️ Undo', variant='secondary')
|
115 |
+
clear_button = gr.Button('🗑️ Clear', variant='secondary')
|
116 |
+
|
117 |
+
saved_input = gr.State()
|
118 |
+
|
119 |
+
with gr.Accordion(label='Advanced options', open=False):
|
120 |
+
system_prompt = gr.Textbox(label='System prompt',
|
121 |
+
value=DEFAULT_SYSTEM_PROMPT,
|
122 |
+
lines=6)
|
123 |
+
max_new_tokens = gr.Slider(
|
124 |
+
label='Max new tokens',
|
125 |
+
minimum=1,
|
126 |
+
maximum=MAX_MAX_NEW_TOKENS,
|
127 |
+
step=1,
|
128 |
+
value=DEFAULT_MAX_NEW_TOKENS,
|
129 |
+
)
|
130 |
+
temperature = gr.Slider(
|
131 |
+
label='Temperature',
|
132 |
+
minimum=0.1,
|
133 |
+
maximum=4.0,
|
134 |
+
step=0.1,
|
135 |
+
value=1.0,
|
136 |
+
)
|
137 |
+
top_p = gr.Slider(
|
138 |
+
label='Top-p (nucleus sampling)',
|
139 |
+
minimum=0.05,
|
140 |
+
maximum=1.0,
|
141 |
+
step=0.05,
|
142 |
+
value=0.95,
|
143 |
+
)
|
144 |
+
top_k = gr.Slider(
|
145 |
+
label='Top-k',
|
146 |
+
minimum=1,
|
147 |
+
maximum=1000,
|
148 |
+
step=1,
|
149 |
+
value=50,
|
150 |
+
)
|
151 |
+
|
152 |
+
gr.Examples(
|
153 |
+
examples=[
|
154 |
+
'Hello there! How are you doing?',
|
155 |
+
'Can you explain briefly to me what is the Python programming language?',
|
156 |
+
'Explain the plot of Cinderella in a sentence.',
|
157 |
+
'How many hours does it take a man to eat a Helicopter?',
|
158 |
+
"Write a 100-word article on 'Benefits of Open-Source in AI research'",
|
159 |
+
],
|
160 |
+
inputs=textbox,
|
161 |
+
outputs=[textbox, chatbot],
|
162 |
+
fn=process_example,
|
163 |
+
cache_examples=True,
|
164 |
+
)
|
165 |
+
|
166 |
+
gr.Markdown(LICENSE)
|
167 |
+
|
168 |
+
textbox.submit(
|
169 |
+
fn=clear_and_save_textbox,
|
170 |
+
inputs=textbox,
|
171 |
+
outputs=[textbox, saved_input],
|
172 |
+
api_name=False,
|
173 |
+
queue=False,
|
174 |
+
).then(
|
175 |
+
fn=display_input,
|
176 |
+
inputs=[saved_input, chatbot],
|
177 |
+
outputs=chatbot,
|
178 |
+
api_name=False,
|
179 |
+
queue=False,
|
180 |
+
).then(
|
181 |
+
fn=check_input_token_length,
|
182 |
+
inputs=[saved_input, chatbot, system_prompt],
|
183 |
+
api_name=False,
|
184 |
+
queue=False,
|
185 |
+
).success(
|
186 |
+
fn=generate,
|
187 |
+
inputs=[
|
188 |
+
saved_input,
|
189 |
+
chatbot,
|
190 |
+
system_prompt,
|
191 |
+
max_new_tokens,
|
192 |
+
temperature,
|
193 |
+
top_p,
|
194 |
+
top_k,
|
195 |
+
],
|
196 |
+
outputs=chatbot,
|
197 |
+
api_name=False,
|
198 |
+
)
|
199 |
+
|
200 |
+
button_event_preprocess = submit_button.click(
|
201 |
+
fn=clear_and_save_textbox,
|
202 |
+
inputs=textbox,
|
203 |
+
outputs=[textbox, saved_input],
|
204 |
+
api_name=False,
|
205 |
+
queue=False,
|
206 |
+
).then(
|
207 |
+
fn=display_input,
|
208 |
+
inputs=[saved_input, chatbot],
|
209 |
+
outputs=chatbot,
|
210 |
+
api_name=False,
|
211 |
+
queue=False,
|
212 |
+
).then(
|
213 |
+
fn=check_input_token_length,
|
214 |
+
inputs=[saved_input, chatbot, system_prompt],
|
215 |
+
api_name=False,
|
216 |
+
queue=False,
|
217 |
+
).success(
|
218 |
+
fn=generate,
|
219 |
+
inputs=[
|
220 |
+
saved_input,
|
221 |
+
chatbot,
|
222 |
+
system_prompt,
|
223 |
+
max_new_tokens,
|
224 |
+
temperature,
|
225 |
+
top_p,
|
226 |
+
top_k,
|
227 |
+
],
|
228 |
+
outputs=chatbot,
|
229 |
+
api_name=False,
|
230 |
+
)
|
231 |
+
|
232 |
+
retry_button.click(
|
233 |
+
fn=delete_prev_fn,
|
234 |
+
inputs=chatbot,
|
235 |
+
outputs=[chatbot, saved_input],
|
236 |
+
api_name=False,
|
237 |
+
queue=False,
|
238 |
+
).then(
|
239 |
+
fn=display_input,
|
240 |
+
inputs=[saved_input, chatbot],
|
241 |
+
outputs=chatbot,
|
242 |
+
api_name=False,
|
243 |
+
queue=False,
|
244 |
+
).then(
|
245 |
+
fn=generate,
|
246 |
+
inputs=[
|
247 |
+
saved_input,
|
248 |
+
chatbot,
|
249 |
+
system_prompt,
|
250 |
+
max_new_tokens,
|
251 |
+
temperature,
|
252 |
+
top_p,
|
253 |
+
top_k,
|
254 |
+
],
|
255 |
+
outputs=chatbot,
|
256 |
+
api_name=False,
|
257 |
+
)
|
258 |
+
|
259 |
+
undo_button.click(
|
260 |
+
fn=delete_prev_fn,
|
261 |
+
inputs=chatbot,
|
262 |
+
outputs=[chatbot, saved_input],
|
263 |
+
api_name=False,
|
264 |
+
queue=False,
|
265 |
+
).then(
|
266 |
+
fn=lambda x: x,
|
267 |
+
inputs=[saved_input],
|
268 |
+
outputs=textbox,
|
269 |
+
api_name=False,
|
270 |
+
queue=False,
|
271 |
+
)
|
272 |
+
|
273 |
+
clear_button.click(
|
274 |
+
fn=lambda: ([], ''),
|
275 |
+
outputs=[chatbot, saved_input],
|
276 |
+
queue=False,
|
277 |
+
api_name=False,
|
278 |
+
)
|
279 |
+
|
280 |
+
demo.queue(max_size=20).launch()
|
model.py
ADDED
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from threading import Thread
|
2 |
+
from typing import Iterator
|
3 |
+
|
4 |
+
import torch
|
5 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
|
6 |
+
|
7 |
+
model_id = 'meta-llama/Llama-2-7b-chat-hf'
|
8 |
+
|
9 |
+
if torch.cuda.is_available():
|
10 |
+
model = AutoModelForCausalLM.from_pretrained(
|
11 |
+
model_id,
|
12 |
+
torch_dtype=torch.float16,
|
13 |
+
device_map='auto'
|
14 |
+
)
|
15 |
+
else:
|
16 |
+
model = None
|
17 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
18 |
+
|
19 |
+
|
20 |
+
def get_prompt(message: str, chat_history: list[tuple[str, str]],
|
21 |
+
system_prompt: str) -> str:
|
22 |
+
texts = [f'<s>[INST] <<SYS>>\n{system_prompt}\n<</SYS>>\n\n']
|
23 |
+
# The first user input is _not_ stripped
|
24 |
+
do_strip = False
|
25 |
+
for user_input, response in chat_history:
|
26 |
+
user_input = user_input.strip() if do_strip else user_input
|
27 |
+
do_strip = True
|
28 |
+
texts.append(f'{user_input} [/INST] {response.strip()} </s><s>[INST] ')
|
29 |
+
message = message.strip() if do_strip else message
|
30 |
+
texts.append(f'{message} [/INST]')
|
31 |
+
return ''.join(texts)
|
32 |
+
|
33 |
+
|
34 |
+
def get_input_token_length(message: str, chat_history: list[tuple[str, str]], system_prompt: str) -> int:
|
35 |
+
prompt = get_prompt(message, chat_history, system_prompt)
|
36 |
+
input_ids = tokenizer([prompt], return_tensors='np', add_special_tokens=False)['input_ids']
|
37 |
+
return input_ids.shape[-1]
|
38 |
+
|
39 |
+
|
40 |
+
def run(message: str,
|
41 |
+
chat_history: list[tuple[str, str]],
|
42 |
+
system_prompt: str,
|
43 |
+
max_new_tokens: int = 1024,
|
44 |
+
temperature: float = 0.8,
|
45 |
+
top_p: float = 0.95,
|
46 |
+
top_k: int = 50) -> Iterator[str]:
|
47 |
+
prompt = get_prompt(message, chat_history, system_prompt)
|
48 |
+
inputs = tokenizer([prompt], return_tensors='pt', add_special_tokens=False).to('cuda')
|
49 |
+
|
50 |
+
streamer = TextIteratorStreamer(tokenizer,
|
51 |
+
timeout=10.,
|
52 |
+
skip_prompt=True,
|
53 |
+
skip_special_tokens=True)
|
54 |
+
generate_kwargs = dict(
|
55 |
+
inputs,
|
56 |
+
streamer=streamer,
|
57 |
+
max_new_tokens=max_new_tokens,
|
58 |
+
do_sample=True,
|
59 |
+
top_p=top_p,
|
60 |
+
top_k=top_k,
|
61 |
+
temperature=temperature,
|
62 |
+
num_beams=1,
|
63 |
+
)
|
64 |
+
t = Thread(target=model.generate, kwargs=generate_kwargs)
|
65 |
+
t.start()
|
66 |
+
|
67 |
+
outputs = []
|
68 |
+
for text in streamer:
|
69 |
+
outputs.append(text)
|
70 |
+
yield ''.join(outputs)
|
requirements.txt
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
accelerate==0.21.0
|
2 |
+
bitsandbytes==0.40.2
|
3 |
+
gradio==3.37.0
|
4 |
+
protobuf==3.20.3
|
5 |
+
scipy==1.11.1
|
6 |
+
sentencepiece==0.1.99
|
7 |
+
torch==2.0.1
|
8 |
+
transformers==4.31.0
|
style.css
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
h1 {
|
2 |
+
text-align: center;
|
3 |
+
}
|
4 |
+
|
5 |
+
#duplicate-button {
|
6 |
+
margin: auto;
|
7 |
+
color: white;
|
8 |
+
background: #1565c0;
|
9 |
+
border-radius: 100vh;
|
10 |
+
}
|
11 |
+
|
12 |
+
#component-0 {
|
13 |
+
max-width: 900px;
|
14 |
+
margin: auto;
|
15 |
+
padding-top: 1.5rem;
|
16 |
+
}
|