Spaces:
Sleeping
Sleeping
Update app.py
Browse fileschat context and model update
app.py
CHANGED
@@ -8,7 +8,7 @@ import google.generativeai as genai
|
|
8 |
|
9 |
from dotenv import load_dotenv
|
10 |
|
11 |
-
# Load the Environment Variables
|
12 |
load_dotenv()
|
13 |
|
14 |
# Set the Gemini API Key
|
@@ -35,14 +35,14 @@ safety_settings = [
|
|
35 |
|
36 |
# Create the Gemini Models for Text and Vision respectively
|
37 |
txt_model = genai.GenerativeModel(
|
38 |
-
model_name="gemini-1.
|
39 |
generation_config=generation_config,
|
40 |
safety_settings=safety_settings,
|
41 |
)
|
42 |
vis_model = genai.GenerativeModel(
|
43 |
-
model_name="gemini-
|
44 |
generation_config=generation_config,
|
45 |
-
|
46 |
)
|
47 |
|
48 |
# System Prompt
|
@@ -146,13 +146,28 @@ def llm_response(history, text, img):
|
|
146 |
list: The updated chat history.
|
147 |
"""
|
148 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
149 |
# Generate Response based on the Input
|
150 |
if not img:
|
151 |
-
response = txt_model.generate_content(f"{system_prompt}User: {text}")
|
|
|
|
|
|
|
|
|
152 |
else:
|
153 |
# Open Image and Generate Response
|
154 |
img = PIL.Image.open(img)
|
155 |
-
|
|
|
|
|
|
|
156 |
|
157 |
# Display Response on Chat UI and return the history
|
158 |
history += [(None, response.text)]
|
|
|
8 |
|
9 |
from dotenv import load_dotenv
|
10 |
|
11 |
+
# Load the Environment Variables from .env file
|
12 |
load_dotenv()
|
13 |
|
14 |
# Set the Gemini API Key
|
|
|
35 |
|
36 |
# Create the Gemini Models for Text and Vision respectively
|
37 |
txt_model = genai.GenerativeModel(
|
38 |
+
model_name="gemini-1.5-pro",
|
39 |
generation_config=generation_config,
|
40 |
safety_settings=safety_settings,
|
41 |
)
|
42 |
vis_model = genai.GenerativeModel(
|
43 |
+
model_name="gemini-pro-vision",
|
44 |
generation_config=generation_config,
|
45 |
+
safety_settings=safety_settings,
|
46 |
)
|
47 |
|
48 |
# System Prompt
|
|
|
146 |
list: The updated chat history.
|
147 |
"""
|
148 |
|
149 |
+
# Convert chat history to string for context
|
150 |
+
history_str = "\n".join(
|
151 |
+
[
|
152 |
+
f"User: {msg[0]}\nBot: {msg[1]}" if msg[1] else f"User: {msg[0]}"
|
153 |
+
for msg in history
|
154 |
+
]
|
155 |
+
)
|
156 |
+
|
157 |
# Generate Response based on the Input
|
158 |
if not img:
|
159 |
+
# response = txt_model.generate_content(f"{system_prompt}User: {text}")
|
160 |
+
chat_session = txt_model.start_chat(history=[])
|
161 |
+
response = chat_session.send_message(
|
162 |
+
f"{system_prompt}History:\n{history_str}\nUser: {text}"
|
163 |
+
)
|
164 |
else:
|
165 |
# Open Image and Generate Response
|
166 |
img = PIL.Image.open(img)
|
167 |
+
chat_session = vis_model.start_chat(history=[])
|
168 |
+
response = chat_session.send_message([f"{system_prompt}\nUser: {text}", img])
|
169 |
+
|
170 |
+
# response = vis_model.generate_content([f"{system_prompt}User: {text}", img])
|
171 |
|
172 |
# Display Response on Chat UI and return the history
|
173 |
history += [(None, response.text)]
|