aiqcamp commited on
Commit
77104eb
·
verified ·
1 Parent(s): eb8806e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +70 -34
app.py CHANGED
@@ -9,8 +9,7 @@ GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
9
  genai.configure(api_key=GEMINI_API_KEY)
10
 
11
  # we will be using the Gemini 2.0 Flash model with Thinking capabilities
12
- model = genai.GenerativeModel("gemini-2.0-flash-thinking-exp-1219")
13
-
14
 
15
  def format_chat_history(messages: list) -> list:
16
  """
@@ -26,27 +25,58 @@ def format_chat_history(messages: list) -> list:
26
  })
27
  return formatted_history
28
 
29
- def stream_gemini_response(user_message: str, messages: list) -> Iterator[list]:
30
  """
31
- Streams thoughts and response with conversation history support.
32
  """
33
- try:
34
- print(f"\n=== New Request ===")
 
 
 
 
35
  print(f"User message: {user_message}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
 
 
37
  # Format chat history for Gemini
38
  chat_history = format_chat_history(messages)
39
 
40
  # Initialize Gemini chat
41
  chat = model.start_chat(history=chat_history)
42
- response = chat.send_message(user_message, stream=True)
43
 
44
- # Initialize buffers and flags
45
  thought_buffer = ""
46
  response_buffer = ""
47
  thinking_complete = False
48
 
49
- # Add initial thinking message
 
50
  messages.append(
51
  ChatMessage(
52
  role="assistant",
@@ -55,7 +85,7 @@ def stream_gemini_response(user_message: str, messages: list) -> Iterator[list]:
55
  )
56
  )
57
 
58
- for chunk in response:
59
  parts = chunk.candidates[0].content.parts
60
  current_chunk = parts[0].text
61
 
@@ -108,6 +138,7 @@ def stream_gemini_response(user_message: str, messages: list) -> Iterator[list]:
108
 
109
  print(f"\n=== Final Response ===\n{response_buffer}")
110
 
 
111
  except Exception as e:
112
  print(f"\n=== Error ===\n{str(e)}")
113
  messages.append(
@@ -118,14 +149,14 @@ def stream_gemini_response(user_message: str, messages: list) -> Iterator[list]:
118
  )
119
  yield messages
120
 
121
- def user_message(msg: str, history: list) -> tuple[str, list]:
122
  """Adds user message to chat history"""
123
- history.append(ChatMessage(role="user", content=msg))
124
- return "", history
125
-
126
 
127
  # Create the Gradio interface
128
- with gr.Blocks(theme=gr.themes.Soft(primary_hue="teal", secondary_hue="slate", neutral_hue="neutral")) as demo: # Using Soft theme with adjusted hues for a refined look
129
  gr.Markdown("# Gemini 2.0 Flash 'Thinking' Chatbot 💭")
130
 
131
  chatbot = gr.Chatbot(
@@ -141,60 +172,65 @@ with gr.Blocks(theme=gr.themes.Soft(primary_hue="teal", secondary_hue="slate", n
141
  lines=1,
142
  label="Chat Message",
143
  placeholder="Type your message here...",
144
- scale=4
145
  )
 
 
146
 
147
  clear_button = gr.Button("Clear Chat", scale=1)
148
 
149
  # Set up event handlers
150
  msg_store = gr.State("") # Store for preserving user message
151
 
 
152
  input_box.submit(
153
- lambda msg: (msg, msg, ""), # Store message and clear input
154
- inputs=[input_box],
155
- outputs=[msg_store, input_box, input_box],
156
  queue=False
157
  ).then(
158
- user_message, # Add user message to chat
159
- inputs=[msg_store, chatbot],
160
- outputs=[input_box, chatbot],
 
 
 
 
 
 
161
  queue=False
162
  ).then(
163
- stream_gemini_response, # Generate and stream response
164
- inputs=[msg_store, chatbot],
165
  outputs=chatbot
166
  )
167
 
 
168
  clear_button.click(
169
  lambda: ([], "", ""),
170
  outputs=[chatbot, input_box, msg_store],
171
  queue=False
172
  )
173
 
174
- gr.Markdown( # Description moved to the bottom
175
  """
176
  <br><br><br> <!-- Add some vertical space -->
177
  ---
178
  ### About this Chatbot
179
-
180
  This chatbot demonstrates the experimental 'thinking' capability of the **Gemini 2.0 Flash** model.
181
  You can observe the model's thought process as it generates responses, displayed with the "⚙️ Thinking" prefix.
182
-
183
  **Key Features:**
184
-
185
  * Powered by Google's **Gemini 2.0 Flash** model.
186
  * Shows the model's **thoughts** before the final answer (experimental feature).
187
  * Supports **conversation history** for multi-turn chats.
 
188
  * Uses **streaming** for a more interactive experience.
189
-
190
  **Instructions:**
191
-
192
- 1. Type your message in the input box below.
193
- 2. Press Enter or click Submit to send.
194
  3. Observe the chatbot's "Thinking" process followed by the final response.
195
  4. Use the "Clear Chat" button to start a new conversation.
196
-
197
- *Please note*: The 'thinking' feature is experimental and the quality of thoughts may vary.
198
  """
199
  )
200
 
 
9
  genai.configure(api_key=GEMINI_API_KEY)
10
 
11
  # we will be using the Gemini 2.0 Flash model with Thinking capabilities
12
+ model = genai.GenerativeModel("gemini-2.0-flash-thinking-exp-1219") # Consider Gemini Pro Vision for Image input
 
13
 
14
  def format_chat_history(messages: list) -> list:
15
  """
 
25
  })
26
  return formatted_history
27
 
28
+ def stream_gemini_response(message_input: str|gr.File, messages: list) -> Iterator[list]:
29
  """
30
+ Streams thoughts and response with conversation history support, handling text or file input.
31
  """
32
+ user_message = ""
33
+ input_file = None
34
+
35
+ if isinstance(message_input, str):
36
+ user_message = message_input
37
+ print(f"\n=== New Request (Text) ===")
38
  print(f"User message: {user_message}")
39
+ elif isinstance(message_input, gr.utils.SerializableData): #gr.File directly won't be recognized correctly after gr v4.x, use SerializableData instead.
40
+ input_file = message_input.name # Access the temporary file path
41
+ file_type = message_input.orig_name.split('.')[-1].lower() #Get original filename's extension
42
+ print(f"\n=== New Request (File) ===")
43
+ print(f"File uploaded: {input_file}, type: {file_type}")
44
+
45
+ try:
46
+ with open(input_file, "rb") as f: #Open file in binary mode for universal handling
47
+ file_data = f.read()
48
+
49
+ if file_type in ['png', 'jpg', 'jpeg', 'gif']: #Example Image Types - expand as needed
50
+ user_message = {"inline_data": {"mime_type": f"image/{file_type}", "data": file_data}} #Prepare image part for Gemini
51
+ elif file_type == 'csv':
52
+ user_message = {"inline_data": {"mime_type": "text/csv", "data": file_data}} #Prepare csv part
53
+
54
+ except Exception as e:
55
+ print(f"Error reading file: {e}")
56
+ messages.append(ChatMessage(role="assistant", content=f"Error reading file: {e}"))
57
+ yield messages
58
+ return
59
+ else:
60
+ messages.append(ChatMessage(role="assistant", content="Sorry, I cannot understand this input format."))
61
+ yield messages
62
+ return
63
+
64
 
65
+ try:
66
  # Format chat history for Gemini
67
  chat_history = format_chat_history(messages)
68
 
69
  # Initialize Gemini chat
70
  chat = model.start_chat(history=chat_history)
71
+ response = chat.send_message(user_message, stream=True) #Send the message part as is
72
 
73
+ # Initialize buffers and flags - same as before
74
  thought_buffer = ""
75
  response_buffer = ""
76
  thinking_complete = False
77
 
78
+
79
+ # Add initial thinking message - same as before
80
  messages.append(
81
  ChatMessage(
82
  role="assistant",
 
85
  )
86
  )
87
 
88
+ for chunk in response: #streaming logic - same as before
89
  parts = chunk.candidates[0].content.parts
90
  current_chunk = parts[0].text
91
 
 
138
 
139
  print(f"\n=== Final Response ===\n{response_buffer}")
140
 
141
+
142
  except Exception as e:
143
  print(f"\n=== Error ===\n{str(e)}")
144
  messages.append(
 
149
  )
150
  yield messages
151
 
152
+ def user_message(message_text, file_upload, history: list) -> tuple[str, None, list]:
153
  """Adds user message to chat history"""
154
+ msg = message_text if message_text else file_upload
155
+ history.append(ChatMessage(role="user", content=msg if isinstance(msg, str) else msg.name)) #Store message or filename in history.
156
+ return "", None, history #clear both input fields
157
 
158
  # Create the Gradio interface
159
+ with gr.Blocks(theme=gr.themes.Soft(primary_hue="teal", secondary_hue="slate", neutral_hue="neutral")) as demo:
160
  gr.Markdown("# Gemini 2.0 Flash 'Thinking' Chatbot 💭")
161
 
162
  chatbot = gr.Chatbot(
 
172
  lines=1,
173
  label="Chat Message",
174
  placeholder="Type your message here...",
175
+ scale=3
176
  )
177
+ file_upload = gr.File(label="Upload File", file_types=["image", "file"], scale=2) # Allow image and any file type
178
+
179
 
180
  clear_button = gr.Button("Clear Chat", scale=1)
181
 
182
  # Set up event handlers
183
  msg_store = gr.State("") # Store for preserving user message
184
 
185
+
186
  input_box.submit(
187
+ user_message,
188
+ inputs=[input_box, file_upload, chatbot],
189
+ outputs=[input_box, file_upload, chatbot],
190
  queue=False
191
  ).then(
192
+ stream_gemini_response,
193
+ inputs=[input_box, chatbot], # Input either from text box or file, logic inside stream_gemini_response
194
+ outputs=chatbot
195
+ )
196
+
197
+ file_upload.upload(
198
+ user_message,
199
+ inputs=[input_box, file_upload, chatbot], # even textbox is input here so clearing both will work
200
+ outputs=[input_box, file_upload, chatbot],
201
  queue=False
202
  ).then(
203
+ stream_gemini_response,
204
+ inputs=[file_upload, chatbot], # Input is now the uploaded file.
205
  outputs=chatbot
206
  )
207
 
208
+
209
  clear_button.click(
210
  lambda: ([], "", ""),
211
  outputs=[chatbot, input_box, msg_store],
212
  queue=False
213
  )
214
 
215
+ gr.Markdown( # Description moved to the bottom - unchanged
216
  """
217
  <br><br><br> <!-- Add some vertical space -->
218
  ---
219
  ### About this Chatbot
 
220
  This chatbot demonstrates the experimental 'thinking' capability of the **Gemini 2.0 Flash** model.
221
  You can observe the model's thought process as it generates responses, displayed with the "⚙️ Thinking" prefix.
 
222
  **Key Features:**
 
223
  * Powered by Google's **Gemini 2.0 Flash** model.
224
  * Shows the model's **thoughts** before the final answer (experimental feature).
225
  * Supports **conversation history** for multi-turn chats.
226
+ * Supports **Image and CSV file uploads** for analysis.
227
  * Uses **streaming** for a more interactive experience.
 
228
  **Instructions:**
229
+ 1. Type your message in the input box or Upload a file below.
230
+ 2. Press Enter/Submit or Upload to send.
 
231
  3. Observe the chatbot's "Thinking" process followed by the final response.
232
  4. Use the "Clear Chat" button to start a new conversation.
233
+ *Please note*: The 'thinking' feature is experimental and the quality of thoughts may vary. File analysis capabilities may be limited depending on the model's experimental features.
 
234
  """
235
  )
236