Spaces:
Running
Running
File size: 2,521 Bytes
33b0219 5fb6047 33b0219 5fb6047 33b0219 8a7cd3d 33b0219 f0f71ad 33b0219 f0f71ad 33b0219 f0f71ad 33b0219 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 |
import google.generativeai as genai
import os
import PIL.Image
import gradio as gr
from gradio_multimodalchatbot import MultimodalChatbot
from gradio.data_classes import FileData
# For better security practices, retrieve sensitive information like API keys from environment variables.
# Fetch an environment variable.
GOOGLE_API_KEY = os.environ.get('GOOGLE_API_KEY')
genai.configure(api_key=GOOGLE_API_KEY)
# Initialize genai models
model = genai.GenerativeModel('gemini-pro')
def gemini(input, file, chatbot=[]):
"""
Function to handle gemini model and gemini vision model interactions.
Parameters:
input (str): The input text.
file (File): An optional file object for image processing.
chatbot (list): A list to keep track of chatbot interactions.
Returns:
tuple: Updated chatbot interaction list, an empty string, and None.
"""
messages = []
print(chatbot)
# Process previous chatbot messages if present
if len(chatbot) != 0:
for messages_dict in chatbot:
user_text = messages_dict[0]['text']
bot_text = messages_dict[1]['text']
messages.extend([
{'role': 'user', 'parts': [user_text]},
{'role': 'model', 'parts': [bot_text]}
])
messages.append({'role': 'user', 'parts': [input]})
else:
messages.append({'role': 'user', 'parts': [input]})
try:
response = model.generate_content(messages)
gemini_resp = response.text
# Construct list of messages in the required format
user_msg = {"text": input, "files": []}
bot_msg = {"text": gemini_resp, "files": []}
chatbot.append([user_msg, bot_msg])
except Exception as e:
# Handling exceptions and raising error to the modal
print(f"An error occurred: {e}")
raise gr.Error(e)
return chatbot, "", None
# Define the Gradio Blocks interface
with gr.Blocks() as demo:
# Add a centered header using HTML
gr.HTML("<center><h1>Gemini Chat PRO API</h1></center>")
# Initialize the MultimodalChatbot component
multi = MultimodalChatbot(value=[], height=800)
with gr.Row():
# Textbox for user input with increased scale for better visibility
tb = gr.Textbox(scale=4, placeholder='Input text and press Enter')
# Define the behavior on text submission
tb.submit(gemini, [tb, multi], [multi, tb])
# Launch the demo with a queue to handle multiple users
demo.queue().launch() |