wop commited on
Commit
bd20bce
1 Parent(s): 028bd27

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -14
app.py CHANGED
@@ -1,30 +1,22 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
- from unsloth.chat_templates import get_chat_template
4
 
5
  # Initialize the InferenceClient with the appropriate model
6
  client = InferenceClient("wop/kosmox")
7
 
8
- # Define the chat template and tokenizer configuration
9
- tokenizer = get_chat_template(
10
- tokenizer=None, # Assuming you need to pass an actual tokenizer here
11
- chat_template="phi-3",
12
- mapping={"role": "from", "content": "value", "user": "human", "assistant": "gpt"},
13
- )
14
-
15
  def format_messages(system_message, history, user_message):
16
  # Create a formatted string according to the specified chat template
17
  formatted_message = "<s>\n"
18
  if system_message:
19
- formatted_message += f"{system_message}\n"
20
 
21
  for user_msg, assistant_msg in history:
22
  if user_msg:
23
- formatted_message += f"{user_msg}\n"
24
  if assistant_msg:
25
- formatted_message += f"{assistant_msg}\n"
26
 
27
- formatted_message += f"{user_message}\n"
28
  return formatted_message
29
 
30
  def respond(
@@ -56,7 +48,7 @@ def respond(
56
  demo = gr.ChatInterface(
57
  fn=respond,
58
  additional_inputs=[
59
- gr.Textbox(value="You are AI.", label="System message"),
60
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
61
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
62
  gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
@@ -64,4 +56,4 @@ demo = gr.ChatInterface(
64
  )
65
 
66
  if __name__ == "__main__":
67
- demo.launch()
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
 
3
 
4
  # Initialize the InferenceClient with the appropriate model
5
  client = InferenceClient("wop/kosmox")
6
 
 
 
 
 
 
 
 
7
  def format_messages(system_message, history, user_message):
8
  # Create a formatted string according to the specified chat template
9
  formatted_message = "<s>\n"
10
  if system_message:
11
+ formatted_message += f"<|system|>\n{system_message}\n"
12
 
13
  for user_msg, assistant_msg in history:
14
  if user_msg:
15
+ formatted_message += f"<|user|>\n{user_msg}\n"
16
  if assistant_msg:
17
+ formatted_message += f"<|assistant|>\n{assistant_msg}\n"
18
 
19
+ formatted_message += f"<|user|>\n{user_message}\n"
20
  return formatted_message
21
 
22
  def respond(
 
48
  demo = gr.ChatInterface(
49
  fn=respond,
50
  additional_inputs=[
51
+ gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
52
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
53
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
54
  gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
 
56
  )
57
 
58
  if __name__ == "__main__":
59
+ demo.launch()