import streamlit as st from groq import Groq import os import base64 from io import BytesIO # Initialize Groq client client = Groq( api_key=os.environ.get("GROQ_API_KEY"), ) # Initialize session state for chat history if it doesn't exist if 'messages' not in st.session_state: st.session_state.messages = [] def encode_image(uploaded_file): bytes_data = uploaded_file.getvalue() base64_image = base64.b64encode(bytes_data).decode('utf-8') return f"data:image/{uploaded_file.type.split('/')[-1]};base64,{base64_image}" def generate_response(messages, model, current_image=None): # Create a copy of messages for the API call api_messages = messages.copy() # If there's a current image, add it to the last user message if current_image: api_messages[-1] = { "role": "user", "content": [ {"type": "text", "text": api_messages[-1]["content"]}, { "type": "image_url", "image_url": {"url": current_image} } ] } model = "llama-3.2-90b-vision-preview" stream = client.chat.completions.create( model=model, messages=api_messages, temperature=0.1, top_p=1, stream=True, stop=None, ) for chunk in stream: content = chunk.choices[0].delta.content if content: yield content st.title("Fast Inference Chat") model = st.selectbox( "Select a model", ["llama-3.2-90b-vision-preview", "llama-3.2-11b-vision-preview"], index=0, ) # Display chat history for message in st.session_state.messages: with st.chat_message(message["role"]): st.markdown(message["content"]) # File uploader for images uploaded_file = st.file_uploader("Upload an image (optional)", type=['png', 'jpg', 'jpeg']) # Get user input user_input = st.chat_input('Message to Assistant...', key='prompt_input') if user_input: # Add user message to chat history (text only) st.session_state.messages.append({"role": "user", "content": user_input}) # Display user message with image if present with st.chat_message("user"): st.markdown(user_input) if uploaded_file: st.image(uploaded_file) # Generate and display assistant response with st.chat_message("assistant"): response_placeholder = st.empty() full_response = "" # Prepare image for API call if present current_image = encode_image(uploaded_file) if uploaded_file else None # Stream the response with st.spinner("Generating response..."): for content in generate_response(st.session_state.messages, model, current_image): full_response += content response_placeholder.markdown(full_response + "▌") response_placeholder.markdown(full_response) # Add assistant response to chat history (text only) st.session_state.messages.append({"role": "assistant", "content": full_response})