Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| from openai import OpenAI | |
| import os | |
| from dotenv import load_dotenv | |
| import random | |
| # Load environment variables | |
| load_dotenv() | |
| # Constants | |
| MAX_TOKENS = 4000 | |
| DEFAULT_TEMPERATURE = 0.5 | |
| # Initialize the client | |
| def initialize_client(): | |
| api_key = os.environ.get('HUGGINGFACEHUB_API_TOKEN') | |
| if not api_key: | |
| st.error("HUGGINGFACEHUB_API_TOKEN not found in environment variables.") | |
| st.stop() | |
| return OpenAI( | |
| base_url="https://api-inference.huggingface.co/v1", | |
| api_key=api_key | |
| ) | |
| # Create supported models | |
| model_links = { | |
| "Meta-Llama-3.1-8B": "meta-llama/Meta-Llama-3.1-8B-Instruct", | |
| "Mistral-7B-Instruct-v0.3": "mistralai/Mistral-7B-Instruct-v0.3", | |
| "Gemma-7b-it": "google/gemma-7b-it", | |
| } | |
| # Model information including logos | |
| model_info = { | |
| "Meta-Llama-3.1-8B": { | |
| 'description': """The Llama (3.1) model is a **Large Language Model (LLM)** that's able to have question and answer interactions. | |
| \nIt was created by the [**Meta's AI**](https://llama.meta.com/) team and has over **8 billion parameters.**\n""", | |
| "logo": "llama_logo.gif", | |
| }, | |
| "Mistral-7B-Instruct-v0.3": { | |
| 'description': """The Mistral-7B-Instruct-v0.3 is an instruct-tuned version of Mistral-7B. | |
| \nIt was created by [**Mistral AI**](https://mistral.ai/) and has **7 billion parameters.**\n""", | |
| "logo": "mistrail.jpeg", | |
| }, | |
| "Gemma-7b-it": { | |
| 'description': """Gemma is a family of lightweight, state-of-the-art open models from Google. | |
| \nThe 7B-it variant is instruction-tuned and has **7 billion parameters.**\n""", | |
| "logo": "gemma.jpeg", | |
| } | |
| } | |
| # Random dog images for error message | |
| random_dog_images = ["broken_llama3.jpeg"] | |
| def main(): | |
| st.header('Multi-Models') | |
| # Sidebar for model selection and temperature | |
| selected_model = st.sidebar.selectbox("Select Model", list(model_links.keys())) | |
| temperature = st.sidebar.slider('Select a temperature value', 0.0, 1.0, DEFAULT_TEMPERATURE) | |
| st.markdown(f'_powered_ by ***:violet[{selected_model}]***') | |
| # Display model info and logo | |
| st.sidebar.write(f"You're now chatting with **{selected_model}**") | |
| st.sidebar.markdown(model_info[selected_model]['description']) | |
| st.sidebar.image(model_info[selected_model]['logo'], use_column_width=True) | |
| st.sidebar.markdown("*Generated content may be inaccurate or false.*") | |
| # Initialize chat history | |
| if "messages" not in st.session_state: | |
| st.session_state.messages = [] | |
| # Display chat messages from history on app rerun | |
| for message in st.session_state.messages: | |
| with st.chat_message(message["role"]): | |
| st.markdown(message["content"]) | |
| # Initialize client | |
| client = initialize_client() | |
| # Chat input and response | |
| if prompt := st.chat_input("Type message here..."): | |
| process_user_input(client, prompt, selected_model, temperature) | |
| def process_user_input(client, prompt, selected_model, temperature): | |
| # Display user message | |
| with st.chat_message("user"): | |
| st.markdown(prompt) | |
| st.session_state.messages.append({"role": "user", "content": prompt}) | |
| # Generate and display assistant response | |
| with st.chat_message("assistant"): | |
| try: | |
| stream = client.chat.completions.create( | |
| model=model_links[selected_model], | |
| messages=[ | |
| {"role": m["role"], "content": m["content"]} | |
| for m in st.session_state.messages | |
| ], | |
| temperature=temperature, | |
| stream=True, | |
| max_tokens=MAX_TOKENS, | |
| ) | |
| response = st.write_stream(stream) | |
| except Exception as e: | |
| handle_error(e) | |
| return | |
| st.session_state.messages.append({"role": "assistant", "content": response}) | |
| def handle_error(error): | |
| response = """😵💫 Looks like someone unplugged something! | |
| \n Either the model space is being updated or something is down.""" | |
| st.write(response) | |
| random_dog_pick = random.choice(random_dog_images) | |
| st.image(random_dog_pick) | |
| st.write("This was the error message:") | |
| st.write(str(error)) | |
| if __name__ == "__main__": | |
| main() |