ysharma HF staff commited on
Commit
be8276a
1 Parent(s): e4608ee

replaced together demo with chatglm

Browse files
Files changed (1) hide show
  1. app.py +65 -16
app.py CHANGED
@@ -4,6 +4,17 @@ import requests
4
  import os
5
  from text_generation import Client, InferenceAPIClient
6
 
 
 
 
 
 
 
 
 
 
 
 
7
  #Streaming endpoint for OPENAI ChatGPT
8
  API_URL = "https://api.openai.com/v1/chat/completions"
9
  #Streaming endpoint for OPENCHATKIT
@@ -162,6 +173,33 @@ def predict_together(model: str,
162
  ]
163
  yield chat, history
164
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
165
 
166
  def reset_textbox():
167
  return gr.update(value="")
@@ -173,7 +211,8 @@ def reset_chat(chatbot, state):
173
  return None, []
174
 
175
 
176
- title = """<h1 align="center">🔥🔥Comparison: ChatGPT & OpenChatKit </h1><br><h3 align="center">🚀A Gradio Streaming Demo</h3><br>Official Demo: <a href="https://huggingface.co/spaces/togethercomputer/OpenChatKit">OpenChatKit feedback app</a>"""
 
177
  description = """Language models can be conditioned to act like dialogue agents through a conversational prompt that typically takes the form:
178
  ```
179
  User: <utterance>
@@ -187,6 +226,7 @@ In this app, you can explore the outputs of multiple LLMs when prompted in simil
187
 
188
  with gr.Blocks(css="""#col_container {width: 1000px; margin-left: auto; margin-right: auto;}
189
  #chatgpt {height: 520px; overflow: auto;}
 
190
  #chattogether {height: 520px; overflow: auto;} """ ) as demo:
191
  #clear {width: 100px; height:50px; font-size:12px}""") as demo:
192
  gr.HTML(title)
@@ -201,21 +241,23 @@ with gr.Blocks(css="""#col_container {width: 1000px; margin-left: auto; margin-r
201
  b1 = gr.Button('🏃Run', elem_id = 'run').style(full_width=True)
202
  b2 = gr.Button('🔄Clear up Chatbots!', elem_id = 'clear').style(full_width=True)
203
  state_chatgpt = gr.State([])
204
- state_together = gr.State([])
 
205
 
206
  with gr.Box():
207
  with gr.Row():
208
  chatbot_chatgpt = gr.Chatbot(elem_id="chatgpt", label='ChatGPT API - OPENAI')
209
- chatbot_together = gr.Chatbot(elem_id="chattogether", label='OpenChatKit - Text Generation')
 
210
 
211
  with gr.Column(scale=2, elem_id='parameters'):
212
  with gr.Box():
213
- gr.HTML("Parameters for #OpenCHAtKit")
214
- top_p = gr.Slider(minimum=-0, maximum=1.0,value=0.25, step=0.05,interactive=True, label="Top-p",)
215
- temperature = gr.Slider(minimum=-0, maximum=5.0, value=0.6, step=0.1, interactive=True, label="Temperature", )
216
- top_k = gr.Slider( minimum=1, maximum=50, value=50, step=1, interactive=True, label="Top-k",)
217
- repetition_penalty = gr.Slider( minimum=0.1, maximum=3.0, value=1.01, step=0.01, interactive=True, label="Repetition Penalty",)
218
- watermark = gr.Checkbox(value=True, label="Text watermarking")
219
  model = gr.CheckboxGroup(value="Rallio67/joi2_20B_instruct_alpha",
220
  choices=["togethercomputer/GPT-NeoXT-Chat-Base-20B", "Rallio67/joi2_20B_instruct_alpha", "google/flan-t5-xxl", "google/flan-ul2", "bigscience/bloomz", "EleutherAI/gpt-neox-20b",],
221
  label="Model",visible=False,)
@@ -232,18 +274,25 @@ with gr.Blocks(css="""#col_container {width: 1000px; margin-left: auto; margin-r
232
  inputs.submit( predict_chatgpt,
233
  [inputs, top_p_chatgpt, temperature_chatgpt, openai_api_key, chat_counter_chatgpt, chatbot_chatgpt, state_chatgpt],
234
  [chatbot_chatgpt, state_chatgpt, chat_counter_chatgpt],)
235
- inputs.submit( predict_together,
236
- [temp_textbox_together, inputs, top_p, temperature, top_k, repetition_penalty, watermark, chatbot_together, state_together, ],
237
- [chatbot_together, state_together],)
 
 
 
238
  b1.click( predict_chatgpt,
239
  [inputs, top_p_chatgpt, temperature_chatgpt, openai_api_key, chat_counter_chatgpt, chatbot_chatgpt, state_chatgpt],
240
  [chatbot_chatgpt, state_chatgpt, chat_counter_chatgpt],)
241
- b1.click( predict_together,
242
- [temp_textbox_together, inputs, top_p, temperature, top_k, repetition_penalty, watermark, chatbot_together, state_together, ],
243
- [chatbot_together, state_together],)
 
 
 
244
 
245
  b2.click(reset_chat, [chatbot_chatgpt, state_chatgpt], [chatbot_chatgpt, state_chatgpt])
246
- b2.click(reset_chat, [chatbot_together, state_together], [chatbot_together, state_together])
 
247
 
248
  gr.HTML('''<center><a href="https://huggingface.co/spaces/ysharma/OpenChatKit_ChatGPT_Comparison?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>Duplicate the Space and run securely with your OpenAI API Key</center>''')
249
  gr.Markdown(description)
 
4
  import os
5
  from text_generation import Client, InferenceAPIClient
6
 
7
+ # Load pre-trained model and tokenizer - for THUDM model
8
+ from transformers import AutoModel, AutoTokenizer
9
+ tokenizer_glm = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)
10
+ model_glm = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda()
11
+ model_glm = model_glm.eval()
12
+
13
+ # Load pre-trained model and tokenizer for Chinese to English translator
14
+ from transformers import M2M100ForConditionalGeneration, M2M100Tokenizer
15
+ model_chtoen = M2M100ForConditionalGeneration.from_pretrained("facebook/m2m100_418M")
16
+ tokenizer_chtoen = M2M100Tokenizer.from_pretrained("facebook/m2m100_418M")
17
+
18
  #Streaming endpoint for OPENAI ChatGPT
19
  API_URL = "https://api.openai.com/v1/chat/completions"
20
  #Streaming endpoint for OPENCHATKIT
 
173
  ]
174
  yield chat, history
175
 
176
+ # Define function to generate model predictions and update the history
177
+ def predict_glm(input, history=[]):
178
+ response, history = model_glm.chat(tokenizer_glm, input, history)
179
+ # translate Chinese to English
180
+ history = [(query, translate_Chinese_English(response)) for query, response in history]
181
+ return history, history #[history] + updates
182
+
183
+ def translate_Chinese_English(chinese_text):
184
+ # translate Chinese to English
185
+ tokenizer_chtoen.src_lang = "zh"
186
+ encoded_zh = tokenizer_chtoen(chinese_text, return_tensors="pt")
187
+ generated_tokens = model_chtoen.generate(**encoded_zh, forced_bos_token_id=tokenizer_chtoen.get_lang_id("en"))
188
+ trans_eng_text = tokenizer_chtoen.batch_decode(generated_tokens, skip_special_tokens=True)
189
+ return trans_eng_text[0]
190
+
191
+
192
+ with gr.Blocks() as demo:
193
+ chatbot = gr.Chatbot()
194
+ state = gr.State([])
195
+
196
+ with gr.Row():
197
+ txt = gr.Textbox(show_label=False, placeholder="Enter text and press enter").style(container=False)
198
+
199
+ txt.submit(predict, [txt, state], [chatbot, state])
200
+
201
+ demo.launch(debug=True)
202
+
203
 
204
  def reset_textbox():
205
  return gr.update(value="")
 
211
  return None, []
212
 
213
 
214
+ #title = """<h1 align="center">🔥🔥Comparison: ChatGPT & OpenChatKit </h1><br><h3 align="center">🚀A Gradio Streaming Demo</h3><br>Official Demo: <a href="https://huggingface.co/spaces/togethercomputer/OpenChatKit">OpenChatKit feedback app</a>"""
215
+ title = """<h1 align="center">🔥🔥Comparison: ChatGPT & Open Sourced CHatGLM-6B </h1><br><h3 align="center">🚀A Gradio Chatbot Demo</h3>"""
216
  description = """Language models can be conditioned to act like dialogue agents through a conversational prompt that typically takes the form:
217
  ```
218
  User: <utterance>
 
226
 
227
  with gr.Blocks(css="""#col_container {width: 1000px; margin-left: auto; margin-right: auto;}
228
  #chatgpt {height: 520px; overflow: auto;}
229
+ #chatglm {height: 520px; overflow: auto;} """ ) as demo:
230
  #chattogether {height: 520px; overflow: auto;} """ ) as demo:
231
  #clear {width: 100px; height:50px; font-size:12px}""") as demo:
232
  gr.HTML(title)
 
241
  b1 = gr.Button('🏃Run', elem_id = 'run').style(full_width=True)
242
  b2 = gr.Button('🔄Clear up Chatbots!', elem_id = 'clear').style(full_width=True)
243
  state_chatgpt = gr.State([])
244
+ #state_together = gr.State([])
245
+ state_glm = gr.State([])
246
 
247
  with gr.Box():
248
  with gr.Row():
249
  chatbot_chatgpt = gr.Chatbot(elem_id="chatgpt", label='ChatGPT API - OPENAI')
250
+ #chatbot_together = gr.Chatbot(elem_id="chattogether", label='OpenChatKit - Text Generation')
251
+ chatbot_glm = gr.Chatbot(elem_id="chatglm", label='THUDM-ChatGLM6B')
252
 
253
  with gr.Column(scale=2, elem_id='parameters'):
254
  with gr.Box():
255
+ gr.HTML("Parameters for #OpenCHAtKit", visible=False)
256
+ top_p = gr.Slider(minimum=-0, maximum=1.0,value=0.25, step=0.05,interactive=True, label="Top-p", visible=False)
257
+ temperature = gr.Slider(minimum=-0, maximum=5.0, value=0.6, step=0.1, interactive=True, label="Temperature", visible=False)
258
+ top_k = gr.Slider( minimum=1, maximum=50, value=50, step=1, interactive=True, label="Top-k", visible=False)
259
+ repetition_penalty = gr.Slider( minimum=0.1, maximum=3.0, value=1.01, step=0.01, interactive=True, label="Repetition Penalty", visible=False)
260
+ watermark = gr.Checkbox(value=True, label="Text watermarking", visible=False)
261
  model = gr.CheckboxGroup(value="Rallio67/joi2_20B_instruct_alpha",
262
  choices=["togethercomputer/GPT-NeoXT-Chat-Base-20B", "Rallio67/joi2_20B_instruct_alpha", "google/flan-t5-xxl", "google/flan-ul2", "bigscience/bloomz", "EleutherAI/gpt-neox-20b",],
263
  label="Model",visible=False,)
 
274
  inputs.submit( predict_chatgpt,
275
  [inputs, top_p_chatgpt, temperature_chatgpt, openai_api_key, chat_counter_chatgpt, chatbot_chatgpt, state_chatgpt],
276
  [chatbot_chatgpt, state_chatgpt, chat_counter_chatgpt],)
277
+ #inputs.submit( predict_together,
278
+ # [temp_textbox_together, inputs, top_p, temperature, top_k, repetition_penalty, watermark, chatbot_together, state_together, ],
279
+ # [chatbot_together, state_together],)
280
+ inputs.submit( predict_glm,
281
+ [inputs, state_glm, ],
282
+ [chatbot_glm, state_glm],)
283
  b1.click( predict_chatgpt,
284
  [inputs, top_p_chatgpt, temperature_chatgpt, openai_api_key, chat_counter_chatgpt, chatbot_chatgpt, state_chatgpt],
285
  [chatbot_chatgpt, state_chatgpt, chat_counter_chatgpt],)
286
+ #b1.click( predict_together,
287
+ # [temp_textbox_together, inputs, top_p, temperature, top_k, repetition_penalty, watermark, chatbot_together, state_together, ],
288
+ # [chatbot_together, state_together],)
289
+ b1.click( predict_glm,
290
+ [inputs, state_glm, ],
291
+ [chatbot_glm, state_glm],)
292
 
293
  b2.click(reset_chat, [chatbot_chatgpt, state_chatgpt], [chatbot_chatgpt, state_chatgpt])
294
+ #b2.click(reset_chat, [chatbot_together, state_together], [chatbot_together, state_together])
295
+ b2.click(reset_chat, [chatbot_glm, state_glm], [chatbot_glm, state_glm])
296
 
297
  gr.HTML('''<center><a href="https://huggingface.co/spaces/ysharma/OpenChatKit_ChatGPT_Comparison?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>Duplicate the Space and run securely with your OpenAI API Key</center>''')
298
  gr.Markdown(description)