ubermenchh commited on
Commit
742db12
1 Parent(s): 9cce8d2

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +245 -0
app.py ADDED
@@ -0,0 +1,245 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import Iterator
3
+ from text_generation import Client
4
+ import gradio as gr
5
+
6
+ model_id = 'meta-llama/Llama-2-7b-chat-hf'
7
+
8
+ API_URL = "https://api-inference.huggingface.co/models/" + model_id
9
+ HF_TOKEN = os.environ.get('HF_READ_TOKEN', None)
10
+
11
+ client = Client(
12
+ API_URL,
13
+ headers={'Authorization': f"Bearer {HF_TOKEN}"}
14
+ )
15
+ EOS_STRING = "</s>"
16
+ EOT_STRING = "<EOT>"
17
+
18
+ def get_prompt(message, chat_history, system_prompt):
19
+ texts = [f"<s>[INST] <<SYS>>\n{system_prompt}\n<</SYS>>\n\n"]
20
+
21
+ do_strip = False
22
+ for user_input, response in chat_history:
23
+ user_input = user_input.strip() if do_strip else user_input
24
+ do_strip = True
25
+ texts.append(f"{user_input} [/INST] {response.strip()} </s><s>[INST] ")
26
+ message = message.strip() if do_strip else message
27
+ texts.append(f"{message} [/INST]")
28
+ return ''.join(texts)
29
+
30
+ def run(message, chat_history, system_prompt, max_new_tokens=1024, temperature=0.1, top_p=0.9, top_k=50):
31
+ prompt = get_prompt(message, chat_history, system_prompt)
32
+
33
+ generate_kwargs = dict(
34
+ max_new_tokens=max_new_tokens,
35
+ do_sample=True,
36
+ top_p=top_p,
37
+ top_k=top_k,
38
+ temperature=temperature
39
+ )
40
+ stream = client.generate_stream(prompt, **generate_kwargs)
41
+ output = ''
42
+ for response in stream:
43
+ if any([end_token in response.token.text for end_token in [EOS_STRING, EOT_STRING]]):
44
+ return output
45
+ else:
46
+ output += response.token.text
47
+ yield output
48
+ return output
49
+
50
+
51
+ DEFAULT_SYSTEM_PROMPT = """
52
+ You are Zephyr. You are an AI assistant, you are moderately-polite and give only true information.
53
+ You carefully provide accurate, factual, thoughtful, nuanced answers, and are brilliant at reasoning.
54
+ If you think there might not be a correct answer, you say so. Since you are autoregressive,
55
+ each token you produce is another opportunity to use computation, therefore you always spend a few sentences explaining background context,
56
+ assumptions, and step-by-step thinking BEFORE you try to answer a question.
57
+ """
58
+ MAX_MAX_NEW_TOKENS = 4096
59
+ DEFAULT_MAX_NEW_TOKENS = 1024
60
+ MAX_INPUT_TOKEN_LENGTH = 4096
61
+
62
+ DESCRIPTION = """
63
+ # Zephyr-7b ChatBot
64
+ """
65
+
66
+ def clear_and_save_textbox(message): return '', message
67
+
68
+ def display_input(message, history=[]):
69
+ history.append((message, ''))
70
+ return history
71
+
72
+ def delete_prev_fn(history=[]):
73
+ try:
74
+ message, _ = history.pop()
75
+ except IndexError:
76
+ message = ''
77
+ return history, message or ''
78
+
79
+ def generate(message, history_with_input, system_prompt, max_new_tokens, temperature, top_p, top_k):
80
+ if max_new_tokens > MAX_MAX_NEW_TOKENS:
81
+ raise ValueError
82
+
83
+ history = history_with_input[:-1]
84
+ generator = run(message, history, system_prompt, max_new_tokens, temperature, top_p, top_k)
85
+ try:
86
+ first_response = next(generator)
87
+ yield history + [(message, first_response)]
88
+ except StopIteration:
89
+ yield history + [(message, '')]
90
+ for response in generator:
91
+ yield history + [(message, response)]
92
+
93
+ def process_example(message):
94
+ generator = generate(message, [], DEFAULT_SYSTEM_PROMPT, 1024, 1, 0.95, 50)
95
+ for x in generator:
96
+ pass
97
+ return '', x
98
+
99
+ def check_input_token_length(message, chat_history, system_prompt):
100
+ input_token_length = len(message) + len(chat_history)
101
+ if input_token_length > MAX_INPUT_TOKEN_LENGTH:
102
+ raise gr.Error(f"The accumulated input is too long ({input_token_length} > {MAX_INPUT_TOKEN_LENGTH}). Clear your chat history and try again.")
103
+
104
+ with gr.Blocks() as demo:
105
+ gr.Markdown(DESCRIPTION)
106
+
107
+ with gr.Group():
108
+ chatbot = gr.Chatbot(label='Playground')
109
+ with gr.Row():
110
+ textbox = gr.Textbox(
111
+ container=False,
112
+ show_label=False,
113
+ placeholder='Hi, Zephyr',
114
+ scale=10
115
+ )
116
+ submit_button = gr.Button('Submit', variant='primary', scale=1, min_width=0)
117
+
118
+ with gr.Row():
119
+ retry_button = gr.Button('Retry', variant='secondary')
120
+ undo_button = gr.Button('Undo', variant='secondary')
121
+ clear_button = gr.Button('Clear', variant='secondary')
122
+
123
+ saved_input = gr.State()
124
+
125
+ with gr.Accordion(label='Advanced options', open=False):
126
+ system_prompt = gr.Textbox(label='System prompt', value=DEFAULT_SYSTEM_PROMPT, lines=5, interactive=False)
127
+ max_new_tokens = gr.Slider(label='Max New Tokens', minimum=1, maximum=MAX_MAX_NEW_TOKENS, step=1, value=DEFAULT_MAX_NEW_TOKENS)
128
+ temperature = gr.Slider(label='Temperature', minimum=0.1, maximum=4.0, step=0.1, value=0.1)
129
+ top_p = gr.Slider(label='Top-P (nucleus sampling)', minimum=0.05, maximum=1.0, step=0.05, value=0.9)
130
+ top_k = gr.Slider(label='Top-K', minimum=1, maximum=1000, step=1, value=10)
131
+
132
+ textbox.submit(
133
+ fn=clear_and_save_textbox,
134
+ inputs=textbox,
135
+ outputs=[textbox, saved_input],
136
+ api_name=False,
137
+ queue=False,
138
+ ).then(
139
+ fn=display_input,
140
+ inputs=[saved_input, chatbot],
141
+ outputs=chatbot,
142
+ api_name=False,
143
+ queue=False,
144
+ ).then(
145
+ fn=check_input_token_length,
146
+ inputs=[saved_input, chatbot, system_prompt],
147
+ api_name=False,
148
+ queue=False,
149
+ ).success(
150
+ fn=generate,
151
+ inputs=[
152
+ saved_input,
153
+ chatbot,
154
+ system_prompt,
155
+ max_new_tokens,
156
+ temperature,
157
+ top_p,
158
+ top_k,
159
+ ],
160
+ outputs=chatbot,
161
+ api_name=False,
162
+ )
163
+
164
+ button_event_preprocess = submit_button.click(
165
+ fn=clear_and_save_textbox,
166
+ inputs=textbox,
167
+ outputs=[textbox, saved_input],
168
+ api_name=False,
169
+ queue=False,
170
+ ).then(
171
+ fn=display_input,
172
+ inputs=[saved_input, chatbot],
173
+ outputs=chatbot,
174
+ api_name=False,
175
+ queue=False,
176
+ ).then(
177
+ fn=check_input_token_length,
178
+ inputs=[saved_input, chatbot, system_prompt],
179
+ api_name=False,
180
+ queue=False,
181
+ ).success(
182
+ fn=generate,
183
+ inputs=[
184
+ saved_input,
185
+ chatbot,
186
+ system_prompt,
187
+ max_new_tokens,
188
+ temperature,
189
+ top_p,
190
+ top_k,
191
+ ],
192
+ outputs=chatbot,
193
+ api_name=False,
194
+ )
195
+
196
+ retry_button.click(
197
+ fn=delete_prev_fn,
198
+ inputs=chatbot,
199
+ outputs=[chatbot, saved_input],
200
+ api_name=False,
201
+ queue=False,
202
+ ).then(
203
+ fn=display_input,
204
+ inputs=[saved_input, chatbot],
205
+ outputs=chatbot,
206
+ api_name=False,
207
+ queue=False,
208
+ ).then(
209
+ fn=generate,
210
+ inputs=[
211
+ saved_input,
212
+ chatbot,
213
+ system_prompt,
214
+ max_new_tokens,
215
+ temperature,
216
+ top_p,
217
+ top_k,
218
+ ],
219
+ outputs=chatbot,
220
+ api_name=False,
221
+ )
222
+
223
+ undo_button.click(
224
+ fn=delete_prev_fn,
225
+ inputs=chatbot,
226
+ outputs=[chatbot, saved_input],
227
+ api_name=False,
228
+ queue=False,
229
+ ).then(
230
+ fn=lambda x: x,
231
+ inputs=[saved_input],
232
+ outputs=textbox,
233
+ api_name=False,
234
+ queue=False,
235
+ )
236
+
237
+ clear_button.click(
238
+ fn=lambda: ([], ''),
239
+ outputs=[chatbot, saved_input],
240
+ queue=False,
241
+ api_name=False,
242
+ )
243
+
244
+
245
+ demo.queue(max_size=32).launch(show_api=False)