jonathanlehner commited on
Commit
da24cdd
1 Parent(s): cd40a85

added simple gradio

Browse files
Files changed (1) hide show
  1. app.py +13 -1
app.py CHANGED
@@ -1,3 +1,10 @@
 
 
 
 
 
 
 
1
  import gradio as gr
2
  import random
3
 
@@ -10,7 +17,11 @@ def chat(message):
10
  elif message.startswith("Where"):
11
  response = random.choice(["Here", "There", "Somewhere"])
12
  else:
13
- response = "I don't know"
 
 
 
 
14
  history.append((message, response))
15
  gr.set_state(history)
16
  html = "<div class='chatbot'>"
@@ -27,3 +38,4 @@ iface = gr.Interface(chat, "text", "html", css="""
27
  .resp_msg {background-color:lightgray;align-self:self-end}
28
  """, allow_screenshot=False, allow_flagging=False)
29
  iface.launch()
 
 
1
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, AutoModelForCausalLM
2
+
3
+ tokenizer = AutoTokenizer.from_pretrained("bigscience/T0pp")
4
+ model = AutoModelForSeq2SeqLM.from_pretrained("bigscience/T0pp")
5
+ #tokenizer = AutoTokenizer.from_pretrained("ethzanalytics/ai-msgbot-gpt2-M")
6
+ #model = AutoModelForCausalLM.from_pretrained("ethzanalytics/ai-msgbot-gpt2-M")
7
+
8
  import gradio as gr
9
  import random
10
 
 
17
  elif message.startswith("Where"):
18
  response = random.choice(["Here", "There", "Somewhere"])
19
  else:
20
+ inputs = tokenizer.encode(message, return_tensors="pt")
21
+ input_len = len(message)
22
+ outputs = model.generate(inputs)
23
+ response = tokenizer.decode(outputs[0])[input_len:]
24
+
25
  history.append((message, response))
26
  gr.set_state(history)
27
  html = "<div class='chatbot'>"
 
38
  .resp_msg {background-color:lightgray;align-self:self-end}
39
  """, allow_screenshot=False, allow_flagging=False)
40
  iface.launch()
41
+