DarwinAnim8or commited on
Commit
ba171f6
1 Parent(s): d35d614

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -8
app.py CHANGED
@@ -1,5 +1,6 @@
1
  from huggingface_hub import InferenceClient
2
  import gradio as gr
 
3
 
4
  API_URL = "https://api-inference.huggingface.co/models/"
5
 
@@ -7,7 +8,6 @@ client = InferenceClient(
7
  "mistralai/Mistral-7B-Instruct-v0.1"
8
  )
9
 
10
-
11
  def format_prompt(message, history):
12
  prompt = "<s>"
13
  for user_prompt, bot_response in history:
@@ -16,9 +16,7 @@ def format_prompt(message, history):
16
  prompt += f"[INST] {message} [/INST]"
17
  return prompt
18
 
19
- def generate(
20
- prompt, history, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0,
21
- ):
22
  temperature = float(temperature)
23
  if temperature < 1e-2:
24
  temperature = 1e-2
@@ -30,7 +28,7 @@ def generate(
30
  top_p=top_p,
31
  repetition_penalty=repetition_penalty,
32
  do_sample=True,
33
- seed=42,
34
  )
35
 
36
  formatted_prompt = format_prompt(prompt, history)
@@ -56,9 +54,9 @@ additional_inputs=[
56
  ),
57
  gr.Slider(
58
  label="Max new tokens",
59
- value=256,
60
  minimum=64,
61
- maximum=8000,
62
  step=64,
63
  interactive=True,
64
  info="The maximum numbers of new tokens",
@@ -83,7 +81,24 @@ additional_inputs=[
83
  )
84
  ]
85
 
86
- with gr.Blocks() as demo:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
87
  gr.ChatInterface(
88
  generate,
89
  additional_inputs=additional_inputs,
 
1
  from huggingface_hub import InferenceClient
2
  import gradio as gr
3
+ import random
4
 
5
  API_URL = "https://api-inference.huggingface.co/models/"
6
 
 
8
  "mistralai/Mistral-7B-Instruct-v0.1"
9
  )
10
 
 
11
  def format_prompt(message, history):
12
  prompt = "<s>"
13
  for user_prompt, bot_response in history:
 
16
  prompt += f"[INST] {message} [/INST]"
17
  return prompt
18
 
19
+ def generate(prompt, history, temperature=0.9, max_new_tokens=512, top_p=0.95, repetition_penalty=1.0):
 
 
20
  temperature = float(temperature)
21
  if temperature < 1e-2:
22
  temperature = 1e-2
 
28
  top_p=top_p,
29
  repetition_penalty=repetition_penalty,
30
  do_sample=True,
31
+ seed=random.randint(0, 10**7),
32
  )
33
 
34
  formatted_prompt = format_prompt(prompt, history)
 
54
  ),
55
  gr.Slider(
56
  label="Max new tokens",
57
+ value=512,
58
  minimum=64,
59
+ maximum=1024,
60
  step=64,
61
  interactive=True,
62
  info="The maximum numbers of new tokens",
 
81
  )
82
  ]
83
 
84
+ customCSS = """
85
+ .contain {
86
+ display: flex;
87
+ flex-direction: column;
88
+ }
89
+ .gradio-container {
90
+ height: 100vh !important;
91
+ }
92
+ #component-0 {
93
+ height: 100%;
94
+ }
95
+ #chatbot {
96
+ flex-grow: 1;
97
+ overflow: auto;
98
+ }
99
+ """
100
+
101
+ with gr.Blocks(css=customCSS) as demo:
102
  gr.ChatInterface(
103
  generate,
104
  additional_inputs=additional_inputs,