Update app.py
Browse filesChat context and model update
app.py
CHANGED
@@ -35,12 +35,12 @@ safety_settings = [
|
|
35 |
|
36 |
# Create the Gemini Models for Text and Vision respectively
|
37 |
txt_model = genai.GenerativeModel(
|
38 |
-
model_name="gemini-1.
|
39 |
generation_config=generation_config,
|
40 |
safety_settings=safety_settings,
|
41 |
)
|
42 |
vis_model = genai.GenerativeModel(
|
43 |
-
model_name="gemini-
|
44 |
generation_config=generation_config,
|
45 |
safety_settings=safety_settings,
|
46 |
)
|
@@ -125,13 +125,28 @@ def llm_response(history, text, img):
|
|
125 |
list: The updated chat history.
|
126 |
"""
|
127 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
128 |
# Generate Response based on the Input
|
129 |
if not img:
|
130 |
-
response = txt_model.generate_content(f"{system_prompt}User: {text}")
|
|
|
|
|
|
|
|
|
131 |
else:
|
132 |
# Open Image and Generate Response
|
133 |
img = PIL.Image.open(img)
|
134 |
-
|
|
|
|
|
|
|
135 |
|
136 |
# Display Response on Chat UI and return the history
|
137 |
history += [(None, response.text)]
|
@@ -160,4 +175,4 @@ with gr.Blocks(theme=gr.themes.Soft()) as app:
|
|
160 |
|
161 |
# Launch the Interface
|
162 |
app.queue()
|
163 |
-
app.launch(debug=False)
|
|
|
35 |
|
36 |
# Create the Gemini Models for Text and Vision respectively
|
37 |
txt_model = genai.GenerativeModel(
|
38 |
+
model_name="gemini-1.5-flash",
|
39 |
generation_config=generation_config,
|
40 |
safety_settings=safety_settings,
|
41 |
)
|
42 |
vis_model = genai.GenerativeModel(
|
43 |
+
model_name="gemini-pro-vision",
|
44 |
generation_config=generation_config,
|
45 |
safety_settings=safety_settings,
|
46 |
)
|
|
|
125 |
list: The updated chat history.
|
126 |
"""
|
127 |
|
128 |
+
# Convert chat history to string for context
|
129 |
+
history_str = "\n".join(
|
130 |
+
[
|
131 |
+
f"User: {msg[0]}\nBot: {msg[1]}" if msg[1] else f"User: {msg[0]}"
|
132 |
+
for msg in history
|
133 |
+
]
|
134 |
+
)
|
135 |
+
|
136 |
# Generate Response based on the Input
|
137 |
if not img:
|
138 |
+
# response = txt_model.generate_content(f"{system_prompt}User: {text}")
|
139 |
+
chat_session = txt_model.start_chat(history=[])
|
140 |
+
response = chat_session.send_message(
|
141 |
+
f"{system_prompt}History:\n{history_str}\nUser: {text}"
|
142 |
+
)
|
143 |
else:
|
144 |
# Open Image and Generate Response
|
145 |
img = PIL.Image.open(img)
|
146 |
+
chat_session = vis_model.start_chat(history=[])
|
147 |
+
response = chat_session.send_message([f"{system_prompt}\nUser: {text}", img])
|
148 |
+
|
149 |
+
# response = vis_model.generate_content([f"{system_prompt}User: {text}", img])
|
150 |
|
151 |
# Display Response on Chat UI and return the history
|
152 |
history += [(None, response.text)]
|
|
|
175 |
|
176 |
# Launch the Interface
|
177 |
app.queue()
|
178 |
+
app.launch(debug=False)
|