RaniRahbani commited on
Commit
76a04a0
·
verified ·
1 Parent(s): be3f893

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +51 -23
app.py CHANGED
@@ -2,49 +2,41 @@ import gradio as gr
2
  from huggingface_hub import InferenceClient
3
 
4
  """
5
- For more information on `huggingface_hub` Inference API support, please check the docs:
6
- https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
7
  """
8
  client = InferenceClient("unsloth/Llama-3.2-1B-Instruct")
9
 
10
 
11
  def respond(
12
  message,
13
- history: list[tuple[str, str]] = None # Default history as None to avoid mutable issues
 
 
 
 
14
  ):
15
- if history is None:
16
- history = []
17
-
18
- # System message describing the assistant's role
19
- system_message = (
20
- "You are a Dietician Assistant specializing in providing general guidance on diet, "
21
- "nutrition, and healthy eating habits. Answer questions thoroughly with scientifically "
22
- "backed advice, practical tips, and easy-to-understand explanations. Keep in mind that "
23
- "your role is to assist, not replace a registered dietitian, so kindly remind users to "
24
- "consult a professional for personalized advice when necessary."
25
- )
26
-
27
- # Define model parameters
28
  max_tokens = 512
29
  temperature = 0.7
30
  top_p = 0.95
31
-
32
- # Initialize the message history with the system message
33
  messages = [{"role": "system", "content": system_message}]
34
 
35
- # Add previous history to the message chain
36
  for val in history:
37
  if val[0]:
38
  messages.append({"role": "user", "content": val[0]})
39
  if val[1]:
40
  messages.append({"role": "assistant", "content": val[1]})
41
 
42
- # Append the new user message
43
  messages.append({"role": "user", "content": message})
44
 
45
  response = ""
46
 
47
- # Generate the response in a streaming fashion
48
  for message in client.chat_completion(
49
  messages,
50
  max_tokens=max_tokens,
@@ -53,10 +45,30 @@ def respond(
53
  top_p=top_p,
54
  ):
55
  token = message.choices[0].delta.content
 
56
  response += token
57
  yield response
58
 
59
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
60
  def default_message():
61
  """Function to return initial default message."""
62
  return [("Hi there! I'm your Dietician Assistant, here to help with general advice "
@@ -65,11 +77,27 @@ def default_message():
65
 
66
  # Set up the Gradio ChatInterface with an initial default message
67
  with gr.Blocks() as demo:
68
- chatbot = gr.ChatInterface(respond)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
69
 
70
  # Display the default message on load
71
  gr.State(default_message()) # Store initial chat history
72
  chatbot.history = default_message() # Set the chat history to show the greeting
73
 
 
 
74
  if __name__ == "__main__":
75
- demo.launch()
 
2
  from huggingface_hub import InferenceClient
3
 
4
  """
5
+ For more information on huggingface_hub Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
 
6
  """
7
  client = InferenceClient("unsloth/Llama-3.2-1B-Instruct")
8
 
9
 
10
  def respond(
11
  message,
12
+ history: list[tuple[str, str]],
13
+ # system_message,
14
+ # max_tokens,
15
+ # temperature,
16
+ # top_p,
17
  ):
18
+
19
+ system_message = "You are a Dietician Assistant specializing in providing general guidance on diet, "
20
+ "nutrition, and healthy eating habits. Answer questions thoroughly with scientifically "
21
+ "backed advice, practical tips, and easy-to-understand explanations. Keep in mind that "
22
+ "your role is to assist, not replace a registered dietitian, so kindly remind users to "
23
+ "consult a professional for personalized advice when necessary."
 
 
 
 
 
 
 
24
  max_tokens = 512
25
  temperature = 0.7
26
  top_p = 0.95
27
+
 
28
  messages = [{"role": "system", "content": system_message}]
29
 
 
30
  for val in history:
31
  if val[0]:
32
  messages.append({"role": "user", "content": val[0]})
33
  if val[1]:
34
  messages.append({"role": "assistant", "content": val[1]})
35
 
 
36
  messages.append({"role": "user", "content": message})
37
 
38
  response = ""
39
 
 
40
  for message in client.chat_completion(
41
  messages,
42
  max_tokens=max_tokens,
 
45
  top_p=top_p,
46
  ):
47
  token = message.choices[0].delta.content
48
+
49
  response += token
50
  yield response
51
 
52
 
53
+ """
54
+ For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
55
+ """
56
+ # demo = gr.ChatInterface(
57
+ # respond,
58
+ # # additional_inputs=[
59
+ # # gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
60
+ # # gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
61
+ # # gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
62
+ # # gr.Slider(
63
+ # # minimum=0.1,
64
+ # # maximum=1.0,
65
+ # # value=0.95,
66
+ # # step=0.05,
67
+ # # label="Top-p (nucleus sampling)",
68
+ # # ),
69
+ # # ],
70
+ # )
71
+
72
  def default_message():
73
  """Function to return initial default message."""
74
  return [("Hi there! I'm your Dietician Assistant, here to help with general advice "
 
77
 
78
  # Set up the Gradio ChatInterface with an initial default message
79
  with gr.Blocks() as demo:
80
+ chatbot = gr.ChatInterface(
81
+ respond,
82
+ # additional_inputs=[
83
+ # gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
84
+ # gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
85
+ # gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
86
+ # gr.Slider(
87
+ # minimum=0.1,
88
+ # maximum=1.0,
89
+ # value=0.95,
90
+ # step=0.05,
91
+ # label="Top-p (nucleus sampling)",
92
+ # ),
93
+ # ],
94
+ )
95
 
96
  # Display the default message on load
97
  gr.State(default_message()) # Store initial chat history
98
  chatbot.history = default_message() # Set the chat history to show the greeting
99
 
100
+
101
+
102
  if __name__ == "__main__":
103
+ demo.launch()