Tijmen2 commited on
Commit
fe25716
·
verified ·
1 Parent(s): ca35e53

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -24
app.py CHANGED
@@ -32,41 +32,46 @@ GREETING_MESSAGES = [
32
  def get_random_greeting():
33
  return random.choice(GREETING_MESSAGES)
34
 
35
- # Function to handle the chat response with streaming
36
  def respond_stream(message, history):
37
- # Add the system message and previous chat history
38
- system_message = "You are AstroSage, a highly knowledgeable AI assistant specialized in astronomy, astrophysics, and cosmology. Provide accurate, engaging, and educational responses about space science and the universe."
 
 
39
  messages = [{"role": "system", "content": system_message}]
40
- for user_msg, assistant_msg in history:
41
- if user_msg:
42
- messages.append({"role": "user", "content": user_msg})
43
- if assistant_msg:
44
- messages.append({"role": "assistant", "content": assistant_msg})
 
 
45
  messages.append({"role": "user", "content": message})
46
 
47
  try:
48
- # Stream response from LLM
49
- stream = llm.create_chat_completion(
50
  messages=messages,
51
  max_tokens=512,
52
  temperature=0.7,
53
  top_p=0.9,
54
- stream=True # Enable streaming
55
- )
56
-
57
- # Stream the chunks of the response
58
- response_content = ""
59
- for chunk in stream:
60
- response_content += chunk["choices"][0]["delta"]["content"]
61
- yield response_content
62
  except Exception as e:
63
- yield f"Error: {e}"
 
64
 
65
- # Using gr.ChatInterface for a simpler chat UI
66
- chatbot = gr.ChatInterface(fn=respond_stream, type="messages")
67
 
68
- # Set a welcome message
 
69
  chatbot.set_welcome_message(get_random_greeting())
70
 
71
- if __name__ == "__main__":
72
- chatbot.launch()
 
 
 
 
 
32
  def get_random_greeting():
33
  return random.choice(GREETING_MESSAGES)
34
 
 
35
  def respond_stream(message, history):
36
+ if not message: # Handle empty messages
37
+ return
38
+
39
+ system_message = "You are AstroSage, a highly knowledgeable AI assistant..." # ... (your system message)
40
  messages = [{"role": "system", "content": system_message}]
41
+
42
+ # Format history correctly (especially important if you use clear)
43
+ for user, assistant in history:
44
+ messages.append({"role": "user", "content": user})
45
+ if assistant: # Check if assistant message exists
46
+ messages.append({"role": "assistant", "content": assistant})
47
+
48
  messages.append({"role": "user", "content": message})
49
 
50
  try:
51
+ response_content = ""
52
+ for chunk in llm.create_chat_completion(
53
  messages=messages,
54
  max_tokens=512,
55
  temperature=0.7,
56
  top_p=0.9,
57
+ stream=True
58
+ ):
59
+ delta = chunk["choices"][0]["delta"]
60
+ if "content" in delta: # check if content exists in delta
61
+ response_content += delta["content"]
62
+ yield response_content # yield inside the loop for streaming
 
 
63
  except Exception as e:
64
+ yield f"Error during generation: {e}"
65
+
66
 
 
 
67
 
68
+ # Use gr.Chatbot for streaming and history management
69
+ chatbot = gr.Chatbot() # No need to specify fn here
70
  chatbot.set_welcome_message(get_random_greeting())
71
 
72
+ with gr.Blocks() as demo:
73
+ chatbot.render()
74
+ clear = gr.Button("Clear")
75
+ clear.click(lambda: None, None, chatbot, fn=lambda: []) # Correct clearing
76
+ chatbot.queue().launch()
77
+