Update app.py
Browse files
app.py
CHANGED
@@ -4,14 +4,30 @@ from llama_cpp import Llama
|
|
4 |
from huggingface_hub import hf_hub_download
|
5 |
import random
|
6 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
@spaces.GPU
|
8 |
-
def
|
|
|
9 |
|
10 |
-
model_path = hf_hub_download(
|
11 |
-
repo_id="AstroMLab/AstroSage-8B-GGUF",
|
12 |
-
filename="AstroSage-8B-Q8_0.gguf"
|
13 |
-
)
|
14 |
-
|
15 |
llm = Llama(
|
16 |
model_path=model_path,
|
17 |
n_ctx=2048,
|
@@ -20,147 +36,129 @@ def main():
|
|
20 |
flash_attn=True,
|
21 |
)
|
22 |
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
|
|
|
|
|
|
29 |
]
|
30 |
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
history = []
|
35 |
-
return "", history + [{"role": "user", "content": user_message}]
|
36 |
|
37 |
-
|
38 |
-
|
39 |
-
if not history:
|
40 |
-
history = []
|
41 |
-
|
42 |
-
# Prepare the messages for the model
|
43 |
-
messages = [
|
44 |
-
{
|
45 |
-
"role": "system",
|
46 |
-
"content": "You are AstroSage, an intelligent AI assistant specializing in astronomy, astrophysics, and space science. You provide accurate, scientific information while making complex concepts accessible. You're enthusiastic about space exploration and maintain a sense of wonder about the cosmos."
|
47 |
-
}
|
48 |
-
]
|
49 |
-
|
50 |
-
# Add chat history
|
51 |
-
for message in history[:-1]: # Exclude the last message which we just added
|
52 |
-
messages.append({"role": message["role"], "content": message["content"]})
|
53 |
-
|
54 |
-
# Add the current user message
|
55 |
-
messages.append({"role": "user", "content": history[-1]["content"]})
|
56 |
-
|
57 |
-
# Start generating the response
|
58 |
-
history.append({"role": "assistant", "content": ""})
|
59 |
-
|
60 |
-
# Stream the response
|
61 |
-
response = llm.create_chat_completion(
|
62 |
-
messages=messages,
|
63 |
-
max_tokens=512,
|
64 |
-
temperature=0.7,
|
65 |
-
top_p=0.95,
|
66 |
-
stream=True,
|
67 |
-
)
|
68 |
-
|
69 |
-
for chunk in response:
|
70 |
-
if chunk and "content" in chunk["choices"][0]["delta"]:
|
71 |
-
history[-1]["content"] += chunk["choices"][0]["delta"]["content"]
|
72 |
-
yield history
|
73 |
|
74 |
-
|
75 |
-
|
76 |
-
return [{"role": "assistant", "content": random.choice(GREETING_MESSAGES)}]
|
77 |
|
78 |
-
#
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
background-color: #0f0f1a;
|
87 |
-
}
|
88 |
-
.contain {
|
89 |
-
max-width: 1200px !important;
|
90 |
-
}
|
91 |
-
"""
|
92 |
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
|
|
121 |
|
122 |
-
|
123 |
-
|
124 |
-
label="Type your message here",
|
125 |
-
placeholder="Ask me anything about space and astronomy...",
|
126 |
-
scale=9
|
127 |
-
)
|
128 |
-
clear = gr.Button("Clear Chat", scale=1)
|
129 |
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
"What is dark matter and why is it important?"
|
138 |
-
],
|
139 |
-
inputs=msg,
|
140 |
-
label="Example Questions"
|
141 |
-
)
|
142 |
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
153 |
)
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
162 |
|
163 |
# Launch the app
|
164 |
if __name__ == "__main__":
|
165 |
-
|
166 |
|
|
|
4 |
from huggingface_hub import hf_hub_download
|
5 |
import random
|
6 |
|
7 |
+
model_path = hf_hub_download(
|
8 |
+
repo_id="AstroMLab/AstroSage-8B-GGUF",
|
9 |
+
filename="AstroSage-8B-Q8_0.gguf"
|
10 |
+
)
|
11 |
+
|
12 |
+
|
13 |
+
# Placeholder responses for when context is empty
|
14 |
+
GREETING_MESSAGES = [
|
15 |
+
"Greetings! I am AstroSage, your guide to the cosmos. What would you like to explore today?",
|
16 |
+
"Welcome to our cosmic journey! I am AstroSage. How may I assist you in understanding the universe?",
|
17 |
+
"AstroSage here. Ready to explore the mysteries of space and time. How may I be of assistance?",
|
18 |
+
"The universe awaits! I'm AstroSage. What astronomical wonders shall we discuss?",
|
19 |
+
]
|
20 |
+
|
21 |
+
def user(user_message, history):
|
22 |
+
"""Add user message to chat history."""
|
23 |
+
if history is None:
|
24 |
+
history = []
|
25 |
+
return "", history + [{"role": "user", "content": user_message}]
|
26 |
+
|
27 |
@spaces.GPU
|
28 |
+
def bot(history):
|
29 |
+
"""Initialize the bot and stream the response."""
|
30 |
|
|
|
|
|
|
|
|
|
|
|
31 |
llm = Llama(
|
32 |
model_path=model_path,
|
33 |
n_ctx=2048,
|
|
|
36 |
flash_attn=True,
|
37 |
)
|
38 |
|
39 |
+
if not history:
|
40 |
+
history = []
|
41 |
+
|
42 |
+
# Prepare the messages for the model
|
43 |
+
messages = [
|
44 |
+
{
|
45 |
+
"role": "system",
|
46 |
+
"content": "You are AstroSage, an intelligent AI assistant specializing in astronomy, astrophysics, and space science. You provide accurate, scientific information while making complex concepts accessible. You're enthusiastic about space exploration and maintain a sense of wonder about the cosmos."
|
47 |
+
}
|
48 |
]
|
49 |
|
50 |
+
# Add chat history
|
51 |
+
for message in history[:-1]: # Exclude the last message which we just added
|
52 |
+
messages.append({"role": message["role"], "content": message["content"]})
|
|
|
|
|
53 |
|
54 |
+
# Add the current user message
|
55 |
+
messages.append({"role": "user", "content": history[-1]["content"]})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
56 |
|
57 |
+
# Start generating the response
|
58 |
+
history.append({"role": "assistant", "content": ""})
|
|
|
59 |
|
60 |
+
# Stream the response
|
61 |
+
response = llm.create_chat_completion(
|
62 |
+
messages=messages,
|
63 |
+
max_tokens=512,
|
64 |
+
temperature=0.7,
|
65 |
+
top_p=0.95,
|
66 |
+
stream=True,
|
67 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
68 |
|
69 |
+
for chunk in response:
|
70 |
+
if chunk and "content" in chunk["choices"][0]["delta"]:
|
71 |
+
history[-1]["content"] += chunk["choices"][0]["delta"]["content"]
|
72 |
+
yield history
|
73 |
+
|
74 |
+
def initial_greeting():
|
75 |
+
"""Return properly formatted initial greeting."""
|
76 |
+
return [{"role": "assistant", "content": random.choice(GREETING_MESSAGES)}]
|
77 |
+
|
78 |
+
# Custom CSS for a space theme
|
79 |
+
custom_css = """
|
80 |
+
#component-0 {
|
81 |
+
background-color: #1a1a2e;
|
82 |
+
border-radius: 15px;
|
83 |
+
padding: 20px;
|
84 |
+
}
|
85 |
+
.dark {
|
86 |
+
background-color: #0f0f1a;
|
87 |
+
}
|
88 |
+
.contain {
|
89 |
+
max-width: 1200px !important;
|
90 |
+
}
|
91 |
+
"""
|
92 |
+
|
93 |
+
# Create the Gradio interface
|
94 |
+
with gr.Blocks(css=custom_css, theme=gr.themes.Soft(primary_hue="indigo", neutral_hue="slate")) as demo:
|
95 |
+
gr.Markdown(
|
96 |
+
"""
|
97 |
+
# π AstroSage: Your Cosmic AI Companion
|
98 |
|
99 |
+
Welcome to AstroSage, an advanced AI assistant specializing in astronomy, astrophysics, and cosmology.
|
100 |
+
Powered by the AstroSage-Llama-3.1-8B model, I'm here to help you explore the wonders of the universe!
|
|
|
|
|
|
|
|
|
|
|
101 |
|
102 |
+
### What Can I Help You With?
|
103 |
+
- πͺ Explanations of astronomical phenomena
|
104 |
+
- π Space exploration and missions
|
105 |
+
- β Stars, galaxies, and cosmology
|
106 |
+
- π Planetary science and exoplanets
|
107 |
+
- π Astrophysics concepts and theories
|
108 |
+
- π Astronomical instruments and observations
|
|
|
|
|
|
|
|
|
|
|
109 |
|
110 |
+
Just type your question below and let's embark on a cosmic journey together!
|
111 |
+
"""
|
112 |
+
)
|
113 |
+
|
114 |
+
chatbot = gr.Chatbot(
|
115 |
+
label="Chat with AstroSage",
|
116 |
+
bubble_full_width=False,
|
117 |
+
show_label=True,
|
118 |
+
height=450,
|
119 |
+
type="messages"
|
120 |
+
)
|
121 |
+
|
122 |
+
with gr.Row():
|
123 |
+
msg = gr.Textbox(
|
124 |
+
label="Type your message here",
|
125 |
+
placeholder="Ask me anything about space and astronomy...",
|
126 |
+
scale=9
|
127 |
)
|
128 |
+
clear = gr.Button("Clear Chat", scale=1)
|
129 |
+
|
130 |
+
# Example questions for quick start
|
131 |
+
gr.Examples(
|
132 |
+
examples=[
|
133 |
+
"What is a black hole and how does it form?",
|
134 |
+
"Can you explain the life cycle of a star?",
|
135 |
+
"What are exoplanets and how do we detect them?",
|
136 |
+
"Tell me about the James Webb Space Telescope.",
|
137 |
+
"What is dark matter and why is it important?"
|
138 |
+
],
|
139 |
+
inputs=msg,
|
140 |
+
label="Example Questions"
|
141 |
+
)
|
142 |
+
|
143 |
+
# Set up the message chain with streaming
|
144 |
+
msg.submit(
|
145 |
+
user,
|
146 |
+
[msg, chatbot],
|
147 |
+
[msg, chatbot],
|
148 |
+
queue=False
|
149 |
+
).then(
|
150 |
+
bot,
|
151 |
+
chatbot,
|
152 |
+
chatbot
|
153 |
+
)
|
154 |
+
|
155 |
+
# Clear button functionality
|
156 |
+
clear.click(lambda: None, None, chatbot, queue=False)
|
157 |
+
|
158 |
+
# Initial greeting
|
159 |
+
demo.load(initial_greeting, None, chatbot, queue=False)
|
160 |
|
161 |
# Launch the app
|
162 |
if __name__ == "__main__":
|
163 |
+
demo.launch()
|
164 |
|