import streamlit as st from llama_cpp import Llama st.set_page_config(page_title="Chat with AI", page_icon="🤖") # Custom CSS for better styling st.markdown(""" """, unsafe_allow_html=True) @st.cache_resource def load_model(): return Llama.from_pretrained( repo_id="Mykes/med_phi3-mini-4k-GGUF", filename="*Q4_K_M.gguf", verbose=False, n_ctx=256, n_batch=256, n_threads=4 ) llm = load_model() basic_prompt = "Q: {question}\nA:" # Initialize chat history if "messages" not in st.session_state: st.session_state.messages = [] # Display chat messages from history on app rerun for message in st.session_state.messages: with st.chat_message(message["role"]): st.markdown(message["content"]) # React to user input if prompt := st.chat_input("What is your question?"): # Display user message in chat message container st.chat_message("user").markdown(prompt) # Add user message to chat history st.session_state.messages.append({"role": "user", "content": prompt}) model_input = basic_prompt.format(question=prompt) # Display assistant response in chat message container with st.chat_message("assistant"): message_placeholder = st.empty() full_response = "" for token in llm( model_input, max_tokens=None, stop=[""], echo=True, stream=True ): full_response += token['choices'][0]['text'] message_placeholder.markdown(full_response + "▌") message_placeholder.markdown(full_response) # Add assistant response to chat history st.session_state.messages.append({"role": "assistant", "content": full_response}) st.sidebar.title("Chat with AI") st.sidebar.markdown("This is a simple chat interface using Streamlit and an AI model.")