ThomasSimonini HF staff commited on
Commit
b5897ad
1 Parent(s): 97a8eec

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +51 -9
app.py CHANGED
@@ -1,6 +1,8 @@
1
  import gradio as gr
2
  from gradio.inputs import Textbox, Slider
3
 
 
 
4
  # Template
5
  title = "A conversation with some NPC in a Tavern 🍻"
6
  description = ""
@@ -16,29 +18,69 @@ article = """
16
  <img src='http://www.simoninithomas.com/test/gandalf.jpg', alt="Gandalf"/>"""
17
  theme="huggingface"
18
 
 
 
19
 
20
 
 
 
 
 
 
 
 
 
 
 
21
 
22
-
23
- #examples = [[0.9, 1.1, 50, "Hey Gandalf! How are you?"], [0.9, 1.1, 50, "Hey Gandalf, why you didn't use the great eagles to fly Frodo to Mordor?"]]
24
-
 
 
 
25
 
26
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
 
28
- def generate_text():
29
- pass
30
 
31
 
32
 
33
 
34
- io = gr.Interface.load("huggingface/EleutherAI/gpt-j-6B")
35
 
36
- iface = gr.Interface(fn=generate_text,
37
  inputs=[Textbox(label="Prompt"),
38
  Slider(minimum=0.5, maximum=1, step=0.05, default=0.9, label="top_p"),
39
  Slider(minimum=0.5, maximum=1.5, step=0.1, default=1.1, label="temperature"),
40
- Slider(minimum=20, maximum=250, step=10, default=50, label="max_new_tokens")],
41
- outputs="text",
 
 
 
42
  #examples="",
43
  allow_screenshot=True,
44
  allow_flagging=True,
 
1
  import gradio as gr
2
  from gradio.inputs import Textbox, Slider
3
 
4
+ import requests
5
+
6
  # Template
7
  title = "A conversation with some NPC in a Tavern 🍻"
8
  description = ""
 
18
  <img src='http://www.simoninithomas.com/test/gandalf.jpg', alt="Gandalf"/>"""
19
  theme="huggingface"
20
 
21
+ context=Prompt
22
+ interlocutor_names = ["Human", "Gandalf"]
23
 
24
 
25
+ # Builds the prompt from what previously happened
26
+ def build_prompt(conversation, context):
27
+ prompt = context + "\n"
28
+ for user_msg, resp_msg in conversation:
29
+ line = "\n- " + interlocutor_names[0] + ":" + user_msg
30
+ prompt += line
31
+ line = "\n- " + interlocutor_names[1] + ":" + resp_msg
32
+ prompt += line
33
+ prompt += ""
34
+ return prompt
35
 
36
+ # Recognize what the model said, if it used the correct format
37
+ def clean_chat_output(txt, prompt):
38
+ delimiter = "\n- "+interlocutor_names[0]
39
+ output = txt.replace(prompt, '')
40
+ output = output[:output.find(delimiter)]
41
+ return output
42
 
43
 
44
+ def chat(top_p, temperature, max_new_tokens, message):
45
+ history = gr.get_state() or []
46
+ history.append((message, ""))
47
+ gr.set_state(history)
48
+ conversation = history
49
+ prompt = build_prompt(conversation, context)
50
+
51
+ # Build JSON
52
+ json_ = {"inputs": prompt,
53
+ "parameters":
54
+ {
55
+ "top_p": top_p,
56
+ "temperature": temperature,
57
+ "max_new_tokens": max_new_tokens,
58
+ "return_full_text": False
59
+ }}
60
+
61
+ output = query(json_)
62
+ output = output[0]['generated_text']
63
+ answer = clean_chat_output(output, prompt)
64
+ response = answer
65
+ history[-1] = (message, response)
66
+ gr.set_state(history)
67
+ return response, history
68
 
 
 
69
 
70
 
71
 
72
 
73
+ #io = gr.Interface.load("huggingface/EleutherAI/gpt-j-6B")
74
 
75
+ iface = gr.Interface(fn=chat,
76
  inputs=[Textbox(label="Prompt"),
77
  Slider(minimum=0.5, maximum=1, step=0.05, default=0.9, label="top_p"),
78
  Slider(minimum=0.5, maximum=1.5, step=0.1, default=1.1, label="temperature"),
79
+ Slider(minimum=20, maximum=250, step=10, default=50, label="max_new_tokens"),
80
+ "text",
81
+ "state"],
82
+
83
+ outputs=["chatbot","state"],
84
  #examples="",
85
  allow_screenshot=True,
86
  allow_flagging=True,