eaglelandsonce commited on
Commit
8105d14
β€’
1 Parent(s): 52d7f2e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +131 -99
app.py CHANGED
@@ -7,35 +7,126 @@ from threading import Thread
7
  # Set an environment variable
8
  HF_TOKEN = os.environ.get("HF_TOKEN", None)
9
 
10
- DESCRIPTION = '''<div><h1 style="text-align: center;">Meta Llama3 8B with EmotiPal</h1></div>'''
11
-
12
- LICENSE = """<p/>---Built with Meta Llama 3"""
13
-
14
- PLACEHOLDER = """"""
15
-
16
- css = """
17
- h1 { text-align: center; display: block;}
18
- #duplicate-button { margin: auto; color: white; background: #1565c0; border-radius: 100vh;}
19
- """
20
-
21
- # EmotiPal HTML
22
  EMOTIPAL_HTML = """
23
- <div style="font-family: Arial, sans-serif; text-align: center; background-color: #f0f0f0; padding: 20px; border-radius: 10px;">
24
- <h1>EmotiPal</h1>
25
- <div id="pet-image" style="width: 200px; height: 200px; margin: 20px auto; border-radius: 50%; background-color: #ddd; display: flex; justify-content: center; align-items: center; font-size: 100px;">
26
- 🐢
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
  </div>
28
- <h2 id="greeting">How are you feeling today?</h2>
29
- </div>
30
- """
31
 
32
- # Define mood messages
33
- MOOD_MESSAGES = {
34
- "happy": "That's wonderful! Your happiness is contagious. Why not share your joy with someone today?",
35
- "sad": "I'm here for you. Remember, it's okay to feel sad sometimes. How about we do a quick gratitude exercise?",
36
- "angry": "I understand you're feeling frustrated. Let's try a deep breathing exercise to help calm down.",
37
- "anxious": "You're not alone in feeling anxious. How about we focus on something positive you're looking forward to?"
38
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
 
40
  # Load the tokenizer and model
41
  tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct")
@@ -43,7 +134,7 @@ model = AutoModelForCausalLM.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruc
43
 
44
  terminators = [
45
  tokenizer.eos_token_id,
46
- tokenizer.convert_tokens_to_ids("")
47
  ]
48
 
49
  @spaces.GPU(duration=120)
@@ -72,80 +163,21 @@ def chat_llama3_8b(message: str, history: list, temperature: float, max_new_toke
72
  outputs.append(text)
73
  return "".join(outputs)
74
 
75
- # EmotiPal function
76
- def set_mood(mood, query_input):
77
- pet_emoji = {"happy": "🐢", "sad": "🐾", "angry": "🐺", "anxious": "🐩"}
78
- greeting = f"I see you're feeling {mood} today."
79
- pet_image = pet_emoji.get(mood, "🐢")
80
- support_message = MOOD_MESSAGES.get(mood, "")
81
-
82
- # Generate a query for Llama 3 based on the mood
83
- if query_input:
84
- llama_query = f"I'm feeling {mood}. {query_input}"
85
- else:
86
- llama_query = f"I'm feeling {mood}. Can you give me some advice or encouragement?"
87
-
88
- return greeting, pet_image, support_message, llama_query
89
-
90
- # Function to handle Llama 3 response
91
- def get_llama_response(query, history):
92
- response = chat_llama3_8b(query, history, temperature=0.7, max_new_tokens=256)
93
  return response
94
 
95
- # Gradio block
96
- with gr.Blocks(fill_height=True, css=css) as demo:
97
- gr.Markdown(DESCRIPTION)
98
-
99
- with gr.Row():
100
- with gr.Column(scale=1):
101
- gr.HTML(EMOTIPAL_HTML)
102
- mood_buttons = gr.Radio(
103
- ["happy", "sad", "angry", "anxious"],
104
- label="Select your mood",
105
- info="Click on your current mood",
106
- )
107
- query_input = gr.Textbox(label="Ask EmotiPal something (optional)")
108
- greeting_output = gr.Textbox(label="EmotiPal says:")
109
- pet_image_output = gr.Textbox(label="Pet")
110
- support_message_output = gr.Textbox(label="Support Message")
111
- llama_query_output = gr.Textbox(label="Query for Llama 3")
112
- llama_response_output = gr.Textbox(label="Llama 3 Response")
113
-
114
- mood_buttons.change(
115
- set_mood,
116
- inputs=[mood_buttons, query_input],
117
- outputs=[greeting_output, pet_image_output, support_message_output, llama_query_output]
118
- )
119
-
120
- ask_llama_button = gr.Button("Ask Llama 3")
121
- ask_llama_button.click(
122
- get_llama_response,
123
- inputs=[llama_query_output, gr.State([])],
124
- outputs=[llama_response_output]
125
- )
126
-
127
- with gr.Column(scale=2):
128
- chatbot = gr.Chatbot(height=450, placeholder=PLACEHOLDER, label='Gradio ChatInterface')
129
- gr.ChatInterface(
130
- fn=chat_llama3_8b,
131
- chatbot=chatbot,
132
- fill_height=True,
133
- additional_inputs_accordion=gr.Accordion(label="βš™οΈ Parameters", open=False, render=False),
134
- additional_inputs=[
135
- gr.Slider(minimum=0, maximum=1, step=0.1, value=0.95, label="Temperature", render=False),
136
- gr.Slider(minimum=128, maximum=4096, step=1, value=512, label="Max new tokens", render=False),
137
- ],
138
- examples=[
139
- ['How to setup a human base on Mars? Give short answer.'],
140
- ["Explain theory of relativity to me like I'm 8 years old."],
141
- ['What is 9,000 * 9,000?'],
142
- ['Write a pun-filled happy birthday message to my friend Alex.'],
143
- ['Justify why a penguin might make a good king of the jungle.']
144
- ],
145
- cache_examples=False,
146
- )
147
-
148
- gr.Markdown(LICENSE)
149
 
150
  if __name__ == "__main__":
151
  demo.launch()
 
7
  # Set an environment variable
8
  HF_TOKEN = os.environ.get("HF_TOKEN", None)
9
 
10
+ # EmotiPal HTML and JavaScript
 
 
 
 
 
 
 
 
 
 
 
11
  EMOTIPAL_HTML = """
12
+ <!DOCTYPE html>
13
+ <html lang="en">
14
+ <head>
15
+ <meta charset="UTF-8">
16
+ <meta name="viewport" content="width=device-width, initial-scale=1.0">
17
+ <title>EmotiPal</title>
18
+ <style>
19
+ body {
20
+ font-family: Arial, sans-serif;
21
+ display: flex;
22
+ justify-content: center;
23
+ align-items: center;
24
+ height: 100vh;
25
+ margin: 0;
26
+ background-color: #f0f0f0;
27
+ }
28
+ .container {
29
+ background-color: white;
30
+ padding: 20px;
31
+ border-radius: 10px;
32
+ box-shadow: 0 0 10px rgba(0,0,0,0.1);
33
+ text-align: center;
34
+ }
35
+ #pet-image {
36
+ width: 200px;
37
+ height: 200px;
38
+ margin: 20px auto;
39
+ border-radius: 50%;
40
+ background-color: #ddd;
41
+ display: flex;
42
+ justify-content: center;
43
+ align-items: center;
44
+ font-size: 100px;
45
+ }
46
+ button {
47
+ margin: 10px;
48
+ padding: 10px 20px;
49
+ font-size: 16px;
50
+ cursor: pointer;
51
+ }
52
+ </style>
53
+ </head>
54
+ <body>
55
+ <div class="container">
56
+ <h1>EmotiPal</h1>
57
+ <div id="pet-image">🐢</div>
58
+ <h2 id="greeting">How are you feeling today?</h2>
59
+ <div>
60
+ <button onclick="setMood('happy')">😊 Happy</button>
61
+ <button onclick="setMood('sad')">😒 Sad</button>
62
+ <button onclick="setMood('angry')">😠 Angry</button>
63
+ <button onclick="setMood('anxious')">😰 Anxious</button>
64
+ </div>
65
+ <p id="support-message"></p>
66
+ <input type="text" id="query-input" placeholder="Ask EmotiPal something (optional)">
67
+ <button onclick="askLlama()">Ask Llama 3</button>
68
+ <p id="llama-response"></p>
69
  </div>
 
 
 
70
 
71
+ <script>
72
+ const supportMessages = {
73
+ happy: "That's wonderful! Your happiness is contagious. Why not share your joy with someone today?",
74
+ sad: "I'm here for you. Remember, it's okay to feel sad sometimes. How about we do a quick gratitude exercise?",
75
+ angry: "I understand you're feeling frustrated. Let's try a deep breathing exercise to help calm down.",
76
+ anxious: "You're not alone in feeling anxious. How about we focus on something positive you're looking forward to?"
77
+ };
78
+
79
+ let currentMood = '';
80
+
81
+ function setMood(mood) {
82
+ currentMood = mood;
83
+ const greeting = document.getElementById('greeting');
84
+ const supportMessage = document.getElementById('support-message');
85
+ const petImage = document.getElementById('pet-image');
86
+
87
+ greeting.textContent = `I see you're feeling ${mood} today.`;
88
+ supportMessage.textContent = supportMessages[mood];
89
+
90
+ switch(mood) {
91
+ case 'happy':
92
+ petImage.textContent = '🐢';
93
+ break;
94
+ case 'sad':
95
+ petImage.textContent = '🐾';
96
+ break;
97
+ case 'angry':
98
+ petImage.textContent = '🐺';
99
+ break;
100
+ case 'anxious':
101
+ petImage.textContent = '🐩';
102
+ break;
103
+ }
104
+ }
105
+
106
+ function askLlama() {
107
+ const queryInput = document.getElementById('query-input').value;
108
+ const llamaQuery = currentMood ?
109
+ `I'm feeling ${currentMood}. ${queryInput}` :
110
+ queryInput || "Can you give me some advice or encouragement?";
111
+
112
+ // This function needs to be implemented in the Python backend
113
+ getLlamaResponse(llamaQuery).then(response => {
114
+ document.getElementById('llama-response').textContent = response;
115
+ });
116
+ }
117
+
118
+ // This function will be replaced by the actual implementation in Gradio
119
+ function getLlamaResponse(query) {
120
+ return new Promise(resolve => {
121
+ setTimeout(() => {
122
+ resolve("This is a placeholder response from Llama 3. Implement the actual API call in the Gradio backend.");
123
+ }, 1000);
124
+ });
125
+ }
126
+ </script>
127
+ </body>
128
+ </html>
129
+ """
130
 
131
  # Load the tokenizer and model
132
  tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct")
 
134
 
135
  terminators = [
136
  tokenizer.eos_token_id,
137
+ tokenizer.convert_tokens_to_ids("<|eot_id|>")
138
  ]
139
 
140
  @spaces.GPU(duration=120)
 
163
  outputs.append(text)
164
  return "".join(outputs)
165
 
166
+ def get_llama_response(query):
167
+ response = chat_llama3_8b(query, [], temperature=0.7, max_new_tokens=256)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
168
  return response
169
 
170
+ # Gradio interface
171
+ with gr.Blocks() as demo:
172
+ gr.HTML(EMOTIPAL_HTML)
173
+ query_input = gr.Textbox(visible=False)
174
+ response_output = gr.Textbox(visible=False)
175
+ gr.Interface(
176
+ fn=get_llama_response,
177
+ inputs=query_input,
178
+ outputs=response_output,
179
+ live=True
180
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
181
 
182
  if __name__ == "__main__":
183
  demo.launch()