Spaces:
Sleeping
Sleeping
import os | |
import streamlit as st | |
import google.generativeai as genai | |
# Set up the Streamlit App | |
st.set_page_config(page_title="Chatbot with Gemini Flash", layout="wide") | |
st.title("Chatbot with Gemini Flash β‘οΈ") | |
st.caption("Chat with Google's Gemini Flash model using text input π") | |
# Get OpenAI API key from user | |
api_key = st.text_input("Enter Google API Key", type="password") | |
# Set up the Gemini model | |
genai.configure(api_key=api_key) | |
model = genai.GenerativeModel(model_name="gemini-1.5-flash-latest") | |
if api_key: | |
# Initialize the chat history | |
if "messages" not in st.session_state: | |
st.session_state.messages = [] | |
# Main layout | |
chat_placeholder = st.container() | |
with chat_placeholder: | |
# Display the chat history | |
for message in st.session_state.messages: | |
with st.chat_message(message["role"]): | |
st.markdown(message["content"]) | |
# User input area at the bottom | |
prompt = st.chat_input("What do you want to know?") | |
if prompt: | |
inputs = [prompt] | |
# Add user message to chat history | |
st.session_state.messages.append({"role": "user", "content": prompt}) | |
# Display user message in chat message container | |
with chat_placeholder: | |
with st.chat_message("user"): | |
st.markdown(prompt) | |
with st.spinner('Generating response...'): | |
# Generate response | |
response = model.generate_content(inputs) | |
# Display assistant response in chat message container | |
with chat_placeholder: | |
with st.chat_message("assistant"): | |
st.markdown(response.text) | |
if not prompt: | |
st.warning("Please enter a text query.") |