Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,101 +1,77 @@
|
|
1 |
import gradio as gr
|
2 |
from typing import List, Tuple, Optional, Union
|
3 |
import os
|
4 |
-
from
|
5 |
-
import
|
6 |
|
7 |
class ChristmasBot:
|
8 |
-
def __init__(self
|
9 |
-
"""
|
10 |
-
Initialize the Christmas chatbot with a Hugging Face model.
|
11 |
-
Default model is Mistral-7B, but you can change it to any other model.
|
12 |
-
"""
|
13 |
self.stable_diffusion_available = False
|
|
|
|
|
14 |
|
15 |
-
# Initialize the model and tokenizer
|
16 |
-
print("Loading model and tokenizer...")
|
17 |
-
self.tokenizer = AutoTokenizer.from_pretrained(model_name)
|
18 |
-
self.model = AutoModelForCausalLM.from_pretrained(
|
19 |
-
model_name,
|
20 |
-
torch_dtype=torch.float16,
|
21 |
-
device_map="auto",
|
22 |
-
load_in_8bit=True # Enable 8-bit quantization for memory efficiency
|
23 |
-
)
|
24 |
-
print("Model loaded successfully!")
|
25 |
-
|
26 |
-
# System prompt to give the model Christmas context
|
27 |
self.system_prompt = """You are Holly, a cheerful Christmas helper chatbot.
|
28 |
You love everything about Christmas and respond in a warm, festive manner.
|
29 |
Keep your responses concise but friendly.
|
30 |
If users ask about sensitive topics, guide the conversation back to Christmas-related subjects."""
|
31 |
-
|
32 |
def _generate_image(self, prompt: str) -> Optional[str]:
|
33 |
-
"""
|
34 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
return None
|
36 |
-
return None
|
37 |
-
|
38 |
-
def _format_chat_history(self, history: List[List[str]]) -> str:
|
39 |
-
"""Format the chat history into a single string for the model."""
|
40 |
-
formatted_history = self.system_prompt + "\n\n"
|
41 |
-
for user_msg, bot_msg in history:
|
42 |
-
if user_msg:
|
43 |
-
formatted_history += f"User: {user_msg}\n"
|
44 |
-
if bot_msg:
|
45 |
-
formatted_history += f"Assistant: {bot_msg}\n"
|
46 |
-
return formatted_history
|
47 |
|
48 |
def _get_llm_response(self, message: str, history: List[List[str]]) -> str:
|
49 |
-
"""
|
50 |
try:
|
51 |
-
|
52 |
-
chat_history = self._format_chat_history(history)
|
53 |
-
prompt = f"{chat_history}User: {message}\nAssistant:"
|
54 |
|
55 |
-
|
56 |
-
|
57 |
-
|
|
|
|
|
58 |
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
|
|
63 |
temperature=0.7,
|
64 |
-
|
65 |
-
repetition_penalty=1.2,
|
66 |
-
do_sample=True,
|
67 |
-
num_return_sequences=1,
|
68 |
-
pad_token_id=self.tokenizer.eos_token_id
|
69 |
)
|
70 |
|
71 |
-
|
72 |
-
response = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
|
73 |
-
response = response.split("Assistant:")[-1].strip()
|
74 |
-
|
75 |
-
return response
|
76 |
|
77 |
except Exception as e:
|
78 |
-
print(f"
|
79 |
-
return "Ho ho ho! I seem to be having trouble with my Christmas magic. Could you try
|
80 |
|
81 |
def process_message(self, message: str, history: List[List[str]]) -> Union[str, Tuple[str, str]]:
|
82 |
"""Process user message and return appropriate response."""
|
83 |
-
# Initial greeting
|
84 |
if not history:
|
85 |
return "Ho ho ho! Merry Christmas! I'm Holly, your Christmas helper. Would you like to create a Christmas card or chat about the holidays?"
|
86 |
|
87 |
message = message.lower()
|
88 |
last_response = history[-1][1].lower() if history else ""
|
89 |
|
90 |
-
# Handle card creation request
|
91 |
if "card" in message:
|
92 |
if self.stable_diffusion_available:
|
93 |
-
return "Wonderful! Let's create a Christmas card. Please describe the scene you'd like on your card, and I'll generate it
|
94 |
return "I'm sorry, but the card generation feature is currently unavailable. Let's chat about Christmas instead!"
|
95 |
|
96 |
-
# Handle card generation
|
97 |
if "card" in last_response and self.stable_diffusion_available:
|
98 |
-
image = self._generate_image(
|
99 |
if image:
|
100 |
return (
|
101 |
f"I've created a Christmas card based on your description: '{message}'. "
|
@@ -104,13 +80,61 @@ class ChristmasBot:
|
|
104 |
)
|
105 |
return "I'm sorry, I couldn't generate the image. Would you like to try again or chat about something else?"
|
106 |
|
107 |
-
# Default to LLM response for all other messages
|
108 |
return self._get_llm_response(message, history)
|
109 |
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
114 |
|
115 |
def user(user_message: str, history: List[List[str]]) -> Tuple[str, List[List[str]]]:
|
116 |
return "", history + [[user_message, None]]
|
@@ -123,30 +147,10 @@ def create_gradio_interface() -> gr.Blocks:
|
|
123 |
history[-1][1] = bot_message
|
124 |
return history, None
|
125 |
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
- Chat about anything Christmas-related
|
131 |
-
- Type 'card' to create a custom Christmas card
|
132 |
-
""")
|
133 |
-
|
134 |
-
chatbot = gr.Chatbot()
|
135 |
-
msg = gr.Textbox(
|
136 |
-
label="Type your message here",
|
137 |
-
placeholder="Ask me anything about Christmas or request a card!",
|
138 |
-
show_label=True
|
139 |
-
)
|
140 |
-
clear = gr.Button("Clear Chat")
|
141 |
-
image_output = gr.Image(label="Generated Card")
|
142 |
-
|
143 |
-
msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
|
144 |
-
bot_response, chatbot, [chatbot, image_output]
|
145 |
-
)
|
146 |
-
clear.click(lambda: None, None, chatbot, queue=False)
|
147 |
-
|
148 |
-
return demo
|
149 |
|
150 |
if __name__ == "__main__":
|
151 |
-
demo = create_gradio_interface()
|
152 |
demo.launch()
|
|
|
1 |
import gradio as gr
|
2 |
from typing import List, Tuple, Optional, Union
|
3 |
import os
|
4 |
+
from openai import OpenAI
|
5 |
+
import json
|
6 |
|
7 |
class ChristmasBot:
|
8 |
+
def __init__(self):
|
9 |
+
"""Initialize the Christmas chatbot with OpenAI."""
|
|
|
|
|
|
|
10 |
self.stable_diffusion_available = False
|
11 |
+
# For Hugging Face Spaces, get the API key from secrets
|
12 |
+
self.client = OpenAI(api_key=os.environ.get('OPENAI_API_KEY'))
|
13 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
self.system_prompt = """You are Holly, a cheerful Christmas helper chatbot.
|
15 |
You love everything about Christmas and respond in a warm, festive manner.
|
16 |
Keep your responses concise but friendly.
|
17 |
If users ask about sensitive topics, guide the conversation back to Christmas-related subjects."""
|
18 |
+
|
19 |
def _generate_image(self, prompt: str) -> Optional[str]:
|
20 |
+
"""Generate an image using DALL-E."""
|
21 |
+
try:
|
22 |
+
response = self.client.images.generate(
|
23 |
+
model="dall-e-3",
|
24 |
+
prompt=f"Christmas themed illustration: {prompt}, festive, cheerful, holiday spirit",
|
25 |
+
size="1024x1024",
|
26 |
+
quality="standard",
|
27 |
+
n=1,
|
28 |
+
)
|
29 |
+
return response.data[0].url
|
30 |
+
except Exception as e:
|
31 |
+
print(f"Image generation error: {e}")
|
32 |
return None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
|
34 |
def _get_llm_response(self, message: str, history: List[List[str]]) -> str:
|
35 |
+
"""Get response from OpenAI."""
|
36 |
try:
|
37 |
+
messages = [{"role": "system", "content": self.system_prompt}]
|
|
|
|
|
38 |
|
39 |
+
for h in history:
|
40 |
+
if h[0]:
|
41 |
+
messages.append({"role": "user", "content": h[0]})
|
42 |
+
if h[1]:
|
43 |
+
messages.append({"role": "assistant", "content": h[1]})
|
44 |
|
45 |
+
messages.append({"role": "user", "content": message})
|
46 |
+
|
47 |
+
response = self.client.chat.completions.create(
|
48 |
+
model="gpt-3.5-turbo",
|
49 |
+
messages=messages,
|
50 |
temperature=0.7,
|
51 |
+
max_tokens=150
|
|
|
|
|
|
|
|
|
52 |
)
|
53 |
|
54 |
+
return response.choices[0].message.content
|
|
|
|
|
|
|
|
|
55 |
|
56 |
except Exception as e:
|
57 |
+
print(f"OpenAI API error: {e}")
|
58 |
+
return "Ho ho ho! I seem to be having trouble with my Christmas magic. Could you try again?"
|
59 |
|
60 |
def process_message(self, message: str, history: List[List[str]]) -> Union[str, Tuple[str, str]]:
|
61 |
"""Process user message and return appropriate response."""
|
|
|
62 |
if not history:
|
63 |
return "Ho ho ho! Merry Christmas! I'm Holly, your Christmas helper. Would you like to create a Christmas card or chat about the holidays?"
|
64 |
|
65 |
message = message.lower()
|
66 |
last_response = history[-1][1].lower() if history else ""
|
67 |
|
|
|
68 |
if "card" in message:
|
69 |
if self.stable_diffusion_available:
|
70 |
+
return "Wonderful! Let's create a Christmas card. Please describe the scene you'd like on your card, and I'll generate it using DALL-E."
|
71 |
return "I'm sorry, but the card generation feature is currently unavailable. Let's chat about Christmas instead!"
|
72 |
|
|
|
73 |
if "card" in last_response and self.stable_diffusion_available:
|
74 |
+
image = self._generate_image(message)
|
75 |
if image:
|
76 |
return (
|
77 |
f"I've created a Christmas card based on your description: '{message}'. "
|
|
|
80 |
)
|
81 |
return "I'm sorry, I couldn't generate the image. Would you like to try again or chat about something else?"
|
82 |
|
|
|
83 |
return self._get_llm_response(message, history)
|
84 |
|
85 |
+
with gr.Blocks(css="""
|
86 |
+
:root {
|
87 |
+
--holly-green: #146B3A;
|
88 |
+
--christmas-red: #EA4630;
|
89 |
+
--snow-white: #F8F9FA;
|
90 |
+
--gold: #FFC107;
|
91 |
+
}
|
92 |
+
.message.user {
|
93 |
+
background-color: var(--holly-green) !important;
|
94 |
+
color: white !important;
|
95 |
+
border-radius: 15px 15px 5px 15px !important;
|
96 |
+
}
|
97 |
+
.message.bot {
|
98 |
+
background-color: var(--christmas-red) !important;
|
99 |
+
color: white !important;
|
100 |
+
border-radius: 15px 15px 15px 5px !important;
|
101 |
+
}
|
102 |
+
textarea {
|
103 |
+
border: 2px solid var(--holly-green) !important;
|
104 |
+
border-radius: 8px !important;
|
105 |
+
}
|
106 |
+
button {
|
107 |
+
background-color: var(--holly-green) !important;
|
108 |
+
color: white !important;
|
109 |
+
border: none !important;
|
110 |
+
border-radius: 8px !important;
|
111 |
+
transition: background-color 0.2s !important;
|
112 |
+
}
|
113 |
+
button:hover {
|
114 |
+
background-color: var(--christmas-red) !important;
|
115 |
+
}
|
116 |
+
""") as demo:
|
117 |
+
gr.Markdown("# π Christmas Chatbot & Card Generator π
")
|
118 |
+
gr.Markdown("""
|
119 |
+
Welcome to the Christmas Chatbot!
|
120 |
+
- Chat about anything Christmas-related
|
121 |
+
- Type 'card' to create a custom Christmas card with DALL-E
|
122 |
+
""")
|
123 |
+
|
124 |
+
bot = ChristmasBot()
|
125 |
+
|
126 |
+
chatbot = gr.Chatbot(
|
127 |
+
bubble_full_width=False,
|
128 |
+
avatar_images=("π€", "π
"),
|
129 |
+
height=400
|
130 |
+
)
|
131 |
+
msg = gr.Textbox(
|
132 |
+
label="Type your message here",
|
133 |
+
placeholder="Ask me anything about Christmas or request a card!",
|
134 |
+
show_label=True
|
135 |
+
)
|
136 |
+
clear = gr.Button("Clear Chat")
|
137 |
+
image_output = gr.Image(label="Generated Card", visible=False)
|
138 |
|
139 |
def user(user_message: str, history: List[List[str]]) -> Tuple[str, List[List[str]]]:
|
140 |
return "", history + [[user_message, None]]
|
|
|
147 |
history[-1][1] = bot_message
|
148 |
return history, None
|
149 |
|
150 |
+
msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
|
151 |
+
bot_response, chatbot, [chatbot, image_output]
|
152 |
+
)
|
153 |
+
clear.click(lambda: None, None, chatbot, queue=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
154 |
|
155 |
if __name__ == "__main__":
|
|
|
156 |
demo.launch()
|