Spaces:
Running
Running
import google.generativeai as genai | |
import os | |
import PIL.Image | |
import gradio as gr | |
from gradio_multimodalchatbot import MultimodalChatbot | |
from gradio.data_classes import FileData | |
# For better security practices, retrieve sensitive information like API keys from environment variables. | |
# Fetch an environment variable. | |
GOOGLE_API_KEY = os.environ.get('GOOGLE_API_KEY') | |
genai.configure(api_key=GOOGLE_API_KEY) | |
# Initialize genai models | |
model = genai.GenerativeModel('gemini-pro') | |
def gemini(input, file, chatbot=[]): | |
""" | |
Function to handle gemini model and gemini vision model interactions. | |
Parameters: | |
input (str): The input text. | |
file (File): An optional file object for image processing. | |
chatbot (list): A list to keep track of chatbot interactions. | |
Returns: | |
tuple: Updated chatbot interaction list, an empty string, and None. | |
""" | |
messages = [] | |
print(chatbot) | |
# Process previous chatbot messages if present | |
if len(chatbot) != 0: | |
for messages_dict in chatbot: | |
user_text = messages_dict[0]['text'] | |
bot_text = messages_dict[1]['text'] | |
messages.extend([ | |
{'role': 'user', 'parts': [user_text]}, | |
{'role': 'model', 'parts': [bot_text]} | |
]) | |
messages.append({'role': 'user', 'parts': [input]}) | |
else: | |
messages.append({'role': 'user', 'parts': [input]}) | |
try: | |
response = model.generate_content(messages) | |
gemini_resp = response.text | |
# Construct list of messages in the required format | |
user_msg = {"text": input, "files": []} | |
bot_msg = {"text": gemini_resp, "files": []} | |
chatbot.append([user_msg, bot_msg]) | |
except Exception as e: | |
# Handling exceptions and raising error to the modal | |
print(f"An error occurred: {e}") | |
raise gr.Error(e) | |
return chatbot, "", None | |
# Define the Gradio Blocks interface | |
with gr.Blocks() as demo: | |
# Add a centered header using HTML | |
gr.HTML("<center><h1>Gemini Chat PRO API</h1></center>") | |
# Initialize the MultimodalChatbot component | |
multi = MultimodalChatbot(value=[], height=800) | |
with gr.Row(): | |
# Textbox for user input with increased scale for better visibility | |
tb = gr.Textbox(scale=4, placeholder='Input text and press Enter') | |
# Define the behavior on text submission | |
tb.submit(gemini, [tb, multi], [multi, tb]) | |
# Launch the demo with a queue to handle multiple users | |
demo.queue().launch() |