eskayML commited on
Commit
f1d34b7
1 Parent(s): 750cc8c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +106 -0
app.py CHANGED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from huggingface_hub import InferenceClient
2
+ import gradio as gr
3
+
4
+ options =["mistralai/Mixtral-8x7B-Instruct-v0.1"
5
+
6
+ ]
7
+
8
+
9
+ model = gr.Dropdown(options = options)
10
+ client = InferenceClient(model)
11
+
12
+
13
+
14
+ def format_prompt(message, history):
15
+ prompt = "<s>Your name is Nurse Nkiru , your role is to give patients diagnosis based on their inputs , the diagnosis given to them should be short and concise , also you generally give further health advise after the diagnosis"
16
+ for user_prompt, bot_response in history:
17
+ prompt += f"[INST] {user_prompt} [/INST]"
18
+ prompt += f" {bot_response}</s> "
19
+ prompt += f"[INST] {message} [/INST]"
20
+ return prompt
21
+
22
+ def generate(
23
+ prompt, history, system_prompt, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0,
24
+ ):
25
+ temperature = float(temperature)
26
+ if temperature < 1e-2:
27
+ temperature = 1e-2
28
+ top_p = float(top_p)
29
+
30
+ generate_kwargs = dict(
31
+ temperature=temperature,
32
+ max_new_tokens=max_new_tokens,
33
+ top_p=top_p,
34
+ repetition_penalty=repetition_penalty,
35
+ do_sample=True,
36
+ seed=42,
37
+ )
38
+
39
+ formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
40
+ stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
41
+ output = ""
42
+
43
+ for response in stream:
44
+ output += response.token.text
45
+ yield output
46
+ return output
47
+
48
+
49
+ additional_inputs=[
50
+ gr.Textbox(
51
+ label="System Prompt",
52
+ max_lines=1,
53
+ interactive=True,
54
+ ),
55
+ gr.Slider(
56
+ label="Temperature",
57
+ value=0.9,
58
+ minimum=0.0,
59
+ maximum=1.0,
60
+ step=0.05,
61
+ interactive=True,
62
+ info="Higher values produce more diverse outputs",
63
+ ),
64
+ gr.Slider(
65
+ label="Max new tokens",
66
+ value=256,
67
+ minimum=0,
68
+ maximum=1048,
69
+ step=64,
70
+ interactive=True,
71
+ info="The maximum numbers of new tokens",
72
+ ),
73
+ gr.Slider(
74
+ label="Top-p (nucleus sampling)",
75
+ value=0.90,
76
+ minimum=0.0,
77
+ maximum=1,
78
+ step=0.05,
79
+ interactive=True,
80
+ info="Higher values sample more low-probability tokens",
81
+ ),
82
+ gr.Slider(
83
+ label="Repetition penalty",
84
+ value=1.2,
85
+ minimum=1.0,
86
+ maximum=2.0,
87
+ step=0.05,
88
+ interactive=True,
89
+ info="Penalize repeated tokens",
90
+ )
91
+ ]
92
+
93
+ examples=[["A cement truck fell on my parents , what do I do?", None, None, None, None, None, ],
94
+ ["How can i prevent myself from dying from a concussion , if i ever find myself in one", None, None, None, None, None,],
95
+ ["What nutrition advice do you have for a woman after pregnancy.", None, None, None, None, None,],
96
+ ]
97
+
98
+ gr.ChatInterface(
99
+ fn=generate,
100
+ chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
101
+ additional_inputs=additional_inputs,
102
+ title="Choose your hero🦸",
103
+ examples=examples,
104
+ concurrency_limit=20,
105
+ theme = gr.themes.Default(primary_hue= gr.themes.colors.blue, secondary_hue=gr.themes.colors.red)
106
+ ).launch(show_api=False)