Spaces:
Sleeping
Sleeping
import os | |
from dotenv import find_dotenv, load_dotenv | |
import streamlit as st | |
from typing import Generator | |
from groq import Groq | |
import datetime | |
import json | |
_ = load_dotenv(find_dotenv()) | |
st.set_page_config(page_icon="π¬", layout="wide", page_title="...") | |
def icon(emoji: str): | |
"""Shows an emoji as a Notion-style page icon.""" | |
st.write( | |
f'<span style="font-size: 78px; line-height: 1">{emoji}</span>', | |
unsafe_allow_html=True, | |
) | |
icon("β‘") | |
st.subheader("Chatbot", divider="rainbow", anchor=False) | |
# LOGIN | |
password = os.environ['PASSWORD'] | |
# Create a login form container | |
login_container = st.container() | |
def login(password_input): | |
if password_input == password: | |
st.session_state.authenticated = True | |
login_container.empty() | |
else: | |
st.error("Invalid password") | |
with login_container: | |
password_input = st.text_input("Password", type="password") | |
st.button("authenticate", on_click=login(password_input)) | |
# Only allow access to the app if the user is authenticated | |
if "authenticated" not in st.session_state or not st.session_state.authenticated: | |
st.stop() # Stop the app from running if the user is not authenticated | |
else: | |
login_container.empty() | |
client = Groq( | |
api_key=os.environ['GROQ_API_KEY'], | |
) | |
# Initialize chat history and selected model | |
if "messages" not in st.session_state: | |
st.session_state.messages = [] | |
if "selected_model" not in st.session_state: | |
st.session_state.selected_model = None | |
# Define model details | |
models = { | |
"mixtral-8x7b-32768": { | |
"name": "Mixtral-8x7b-Instruct-v0.1", | |
"tokens": 32768, | |
"developer": "Mistral", | |
}, | |
"gemma-7b-it": {"name": "Gemma-7b-it", "tokens": 8192, "developer": "Google"}, | |
"llama2-70b-4096": {"name": "LLaMA2-70b-chat", "tokens": 4096, "developer": "Meta"}, | |
"llama3-70b-8192": {"name": "LLaMA3-70b-8192", "tokens": 8192, "developer": "Meta"}, | |
"llama3-8b-8192": {"name": "LLaMA3-8b-8192", "tokens": 8192, "developer": "Meta"}, | |
} | |
# Layout for model selection and max_tokens slider | |
col1, col2 = st.columns(2) | |
with col1: | |
model_option = st.selectbox( | |
"Choose a model:", | |
options=list(models.keys()), | |
format_func=lambda x: x, | |
index=0, # Default to the first model in the list | |
) | |
# Detect model change and clear chat history if model has changed | |
if st.session_state.selected_model != model_option: | |
st.session_state.messages = [] | |
st.session_state.selected_model = model_option | |
max_tokens_range = models[model_option]["tokens"] | |
with col2: | |
# Adjust max_tokens slider dynamically based on the selected model | |
max_tokens = st.slider( | |
"Max Tokens:", | |
min_value=512, # Minimum value to allow some flexibility | |
max_value=max_tokens_range, | |
# Default value or max allowed if less | |
value=min(32768, max_tokens_range), | |
step=512, | |
help=f"Adjust the maximum number of tokens (words) for the model's response. Max for selected model: {max_tokens_range}", | |
) | |
# Display chat messages from history on app rerun | |
for message in st.session_state.messages: | |
avatar = "π§ " if message["role"] == "assistant" else "β" | |
with st.chat_message(message["role"], avatar=avatar): | |
st.markdown(message["content"]) | |
def generate_chat_responses(chat_completion) -> Generator[str, None, None]: | |
"""Yield chat response content from the Groq API response.""" | |
for chunk in chat_completion: | |
if chunk.choices[0].delta.content: | |
yield chunk.choices[0].delta.content | |
if prompt := st.chat_input("Enter your prompt here..."): | |
st.session_state.messages.append({"role": "user", "content": prompt}) | |
with st.chat_message("user", avatar="β"): | |
st.markdown(prompt) | |
# Fetch response from Groq API | |
try: | |
chat_completion = client.chat.completions.create( | |
model=model_option, | |
messages=[ | |
{"role": m["role"], "content": m["content"]} | |
for m in st.session_state.messages | |
], | |
max_tokens=max_tokens, | |
stream=True, | |
) | |
# Use the generator function with st.write_stream | |
with st.chat_message("assistant", avatar="π§ "): | |
chat_responses_generator = generate_chat_responses(chat_completion) | |
full_response = st.write_stream(chat_responses_generator) | |
except Exception as e: | |
st.error(e, icon="π¨") | |
# Append the full response to session_state.messages | |
if isinstance(full_response, str): | |
st.session_state.messages.append( | |
{"role": "assistant", "content": full_response} | |
) | |
else: | |
# Handle the case where full_response is not a string | |
combined_response = "\n".join(str(item) for item in full_response) | |
st.session_state.messages.append( | |
{"role": "assistant", "content": combined_response} | |
) |