vericudebuget commited on
Commit
579a255
1 Parent(s): 743d1bd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +47 -42
app.py CHANGED
@@ -5,49 +5,54 @@ from huggingface_hub import InferenceClient
5
  client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
6
 
7
  def format_prompt(message, history):
8
- prompt = "<s>"
9
- for user_prompt, bot_response in history:
10
- prompt += f"[INST] {user_prompt} [/INST]"
11
- prompt += f" {bot_response}</s> "
12
- prompt += f"[INST] {message} [/INST]"
13
- return prompt
14
 
15
  def generate(prompt, history, system_prompt, temperature=0.9, max_new_tokens=9048, top_p=0.95, repetition_penalty=1.0):
16
- temperature = max(float(temperature), 1e-2)
17
- top_p = float(top_p)
18
-
19
- generate_kwargs = dict(
20
- temperature=temperature,
21
- max_new_tokens=max_new_tokens,
22
- top_p=top_p,
23
- repetition_penalty=repetition_penalty,
24
- do_sample=True,
25
- seed=42,
26
- )
27
-
28
- formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
29
- stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
30
- output = ""
31
-
32
- for response in stream:
33
- output += response.token.text
34
- yield output
35
- return output
36
-
37
- additional_inputs = [
38
- gr.Textbox(label="System Prompt", max_lines=1, interactive=True),
39
- gr.Slider(label="Temperature", value=0.9, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Higher values produce more diverse outputs"),
40
- gr.Slider(label="Max new tokens", value=9048, minimum=256, maximum=9048, step=64, interactive=True, info="The maximum numbers of new tokens"),
41
- gr.Slider(label="Top-p (nucleus sampling)", value=0.90, minimum=0.0, maximum=1, step=0.05, interactive=True, info="Higher values sample more low-probability tokens"),
42
- gr.Slider(label="Repetition penalty", value=1.2, minimum=1.0, maximum=2.0, step=0.05, interactive=True, info="Penalize repeated tokens")
43
- ]
44
-
 
 
 
 
45
  gr.Interface(
46
- fn=generate,
47
- chatbot=gr.Chatbot(show_label=True, show_share_button=True, show_copy_button=True, likeable=True, layout="panel"),
48
- additional_inputs=additional_inputs,
49
- title="ConvoLite",
50
- description="Remember! The AI might give incorrect information about people, locations, history, etc...",
51
- concurrency_limit=20,
52
- theme=gr.themes.Soft() # Set the theme here
 
53
  ).launch(show_api=False,)
 
5
  client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
6
 
7
  def format_prompt(message, history):
8
+ prompt = "<s>"
9
+ for user_prompt, bot_response in history:
10
+ prompt += f"[INST] {user_prompt} [/INST]"
11
+ prompt += f" {bot_response}</s> "
12
+ prompt += f"[INST] {message} [/INST]"
13
+ return prompt
14
 
15
  def generate(prompt, history, system_prompt, temperature=0.9, max_new_tokens=9048, top_p=0.95, repetition_penalty=1.0):
16
+ temperature = max(float(temperature), 1e-2)
17
+ top_p = float(top_p)
18
+
19
+ generate_kwargs = dict(
20
+ temperature=temperature,
21
+ max_new_tokens=max_new_tokens,
22
+ top_p=top_p,
23
+ repetition_penalty=repetition_penalty,
24
+ do_sample=True,
25
+ seed=42,
26
+ )
27
+
28
+ formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
29
+ stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
30
+ output = ""
31
+
32
+ for response in stream:
33
+ output += response.token.text
34
+ yield output
35
+ return output
36
+
37
+ # Define inputs (user interface elements)
38
+ system_prompt_input = gr.Textbox(label="System Prompt", max_lines=1, interactive=True)
39
+ slider_temperature = gr.Slider(label="Temperature", value=0.9, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Higher values produce more diverse outputs")
40
+ slider_max_tokens = gr.Slider(label="Max new tokens", value=9048, minimum=256, maximum=9048, step=64, interactive=True, info="The maximum numbers of new tokens")
41
+ slider_top_p = gr.Slider(label="Top-p (nucleus sampling)", value=0.90, minimum=0.0, maximum=1, step=0.05, interactive=True, info="Higher values sample more low-probability tokens")
42
+ slider_repetition_penalty = gr.Slider(label="Repetition penalty", value=1.2, minimum=1.0, maximum=2.0, step=0.05, interactive=True, info="Penalize repeated tokens")
43
+ inputs = [system_prompt_input] + [slider_temperature, slider_max_tokens, slider_top_p, slider_repetition_penalty]
44
+
45
+ # Define output (generated text)
46
+ output_text = gr.Textbox(label="Output")
47
+
48
+ # Create the Gradio interface
49
  gr.Interface(
50
+ fn=generate,
51
+ chatbot=gr.Chatbot(show_label=True, show_share_button=True, show_copy_button=True, likeable=True, layout="panel"),
52
+ inputs=inputs,
53
+ outputs=output_text,
54
+ title="ConvoLite",
55
+ description="Remember! The AI might give incorrect information about people, locations, history, etc...",
56
+ concurrency_limit=20,
57
+ theme=gr.themes.Soft()
58
  ).launch(show_api=False,)