2001kaye commited on
Commit
76748ec
1 Parent(s): a7ba13a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -9
app.py CHANGED
@@ -15,21 +15,26 @@ def respond(
15
  temperature,
16
  top_p,
17
  ):
18
- messages = [{"role": "system", "content": system_message}]
19
 
20
- # Add history to messages
21
  for val in history:
22
- if val[0]:
23
- messages.append({"role": "user", "content": val[0]})
24
- if val[1]:
25
- messages.append({"role": "assistant", "content": val[1]})
26
 
27
- # Add the current user message
28
- messages.append({"role": "user", "content": message})
 
 
 
 
 
 
29
 
30
  # Generate the response using the Mistral model
31
  response = llm.create_chat_completion(messages=messages)
32
-
 
33
  return response["choices"][0]["message"]["content"] # Adjust based on your model's output format
34
 
35
  # Set up Gradio Chat Interface
 
15
  temperature,
16
  top_p,
17
  ):
18
+ messages = [{"role": "system", "content": system_message or "You are a friendly Chatbot."}]
19
 
20
+ # Add history to messages, ensuring no None values
21
  for val in history:
22
+ user_message = val[0] if val[0] is not None else ""
23
+ assistant_message = val[1] if val[1] is not None else ""
 
 
24
 
25
+ if user_message:
26
+ messages.append({"role": "user", "content": user_message})
27
+ if assistant_message:
28
+ messages.append({"role": "assistant", "content": assistant_message})
29
+
30
+ # Add the current user message, ensure it's not None
31
+ if message:
32
+ messages.append({"role": "user", "content": message})
33
 
34
  # Generate the response using the Mistral model
35
  response = llm.create_chat_completion(messages=messages)
36
+ print("response:", response)
37
+
38
  return response["choices"][0]["message"]["content"] # Adjust based on your model's output format
39
 
40
  # Set up Gradio Chat Interface