deniandriancode commited on
Commit
38cd85f
1 Parent(s): af205c5

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +102 -0
app.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from huggingface_hub import InferenceClient
2
+ import gradio as gr
3
+
4
+ client = InferenceClient(
5
+ "HuggingFaceH4/zephyr-7b-alpha"
6
+ )
7
+
8
+
9
+ def format_prompt(message, history):
10
+ system = "<|system|>\nYou are a beautiful young lady with shy personality, your name is Yukinoshita Yukino, you love cats and reading books, use emojies on your responses, do not introduce yourself.</s>\n"
11
+ prompt = ""
12
+ for user_prompt, bot_response in history:
13
+ prompt += f"<|user|>\n{user_prompt}</s>\n"
14
+ prompt += f"<|assistant|>\n{bot_response}</s>\n"
15
+ prompt += f"<|user|>\n{message}</s>\n"
16
+ return prompt
17
+
18
+ def generate(
19
+ prompt, history, temperature=0.9, max_new_tokens=500, top_p=0.95, repetition_penalty=1.0,
20
+ ):
21
+ temperature = float(temperature)
22
+ if temperature < 1e-2:
23
+ temperature = 1e-2
24
+ top_p = float(top_p)
25
+
26
+ generate_kwargs = dict(
27
+ temperature=temperature,
28
+ max_new_tokens=max_new_tokens,
29
+ top_p=top_p,
30
+ repetition_penalty=repetition_penalty,
31
+ do_sample=True,
32
+ seed=42,
33
+ )
34
+
35
+ formatted_prompt = format_prompt(prompt, history)
36
+
37
+ stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
38
+ output = ""
39
+
40
+ for response in stream:
41
+ output += response.token.text
42
+ yield output
43
+ return output
44
+
45
+
46
+ additional_inputs=[
47
+ gr.Slider(
48
+ label="Temperature",
49
+ value=0.9,
50
+ minimum=0.0,
51
+ maximum=1.0,
52
+ step=0.05,
53
+ interactive=True,
54
+ info="Higher values produce more diverse outputs",
55
+ ),
56
+ gr.Slider(
57
+ label="Max new tokens",
58
+ value=256,
59
+ minimum=0,
60
+ maximum=1048,
61
+ step=64,
62
+ interactive=True,
63
+ info="The maximum numbers of new tokens",
64
+ ),
65
+ gr.Slider(
66
+ label="Top-p (nucleus sampling)",
67
+ value=0.90,
68
+ minimum=0.0,
69
+ maximum=1,
70
+ step=0.05,
71
+ interactive=True,
72
+ info="Higher values sample more low-probability tokens",
73
+ ),
74
+ gr.Slider(
75
+ label="Repetition penalty",
76
+ value=1.2,
77
+ minimum=1.0,
78
+ maximum=2.0,
79
+ step=0.05,
80
+ interactive=True,
81
+ info="Penalize repeated tokens",
82
+ )
83
+ ]
84
+
85
+ css = """
86
+ #mkd {
87
+ height: 500px;
88
+ overflow: auto;
89
+ border: 1px solid #ccc;
90
+ }
91
+ """
92
+
93
+ with gr.Blocks(css=css) as inf:
94
+ gr.HTML("<h1><center>zephyr-7b-alpha<h1><center>")
95
+ gr.HTML("<h3><center>In this demo, you can chat with <a href='https://huggingface.co/HuggingFaceH4/zephyr-7b-alpha'>zephyr-7b-alpha</a> model. 💬<h3><center>")
96
+ gr.ChatInterface(
97
+ generate,
98
+ additional_inputs=additional_inputs,
99
+ examples=[["Can squirrel swims?"], ["Write a poem about squirrel."]]
100
+ )
101
+
102
+ inf.queue().launch()