Mykes commited on
Commit
bb43e92
β€’
1 Parent(s): 70e229a

Upload app_interface_working.py

Browse files
Files changed (1) hide show
  1. app_interface_working.py +91 -0
app_interface_working.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from llama_cpp import Llama
3
+
4
+ st.set_page_config(page_title="Chat with AI", page_icon="πŸ€–")
5
+
6
+ # Custom CSS for better styling
7
+ st.markdown("""
8
+ <style>
9
+ .stTextInput > div > div > input {
10
+ background-color: #f0f2f6;
11
+ }
12
+ .chat-message {
13
+ padding: 1.5rem; border-radius: 0.5rem; margin-bottom: 1rem; display: flex
14
+ }
15
+ .chat-message.user {
16
+ background-color: #2b313e
17
+ }
18
+ .chat-message.bot {
19
+ background-color: #475063
20
+ }
21
+ .chat-message .avatar {
22
+ width: 20%;
23
+ }
24
+ .chat-message .avatar img {
25
+ max-width: 78px;
26
+ max-height: 78px;
27
+ border-radius: 50%;
28
+ object-fit: cover;
29
+ }
30
+ .chat-message .message {
31
+ width: 80%;
32
+ padding: 0 1.5rem;
33
+ color: #fff;
34
+ }
35
+ </style>
36
+ """, unsafe_allow_html=True)
37
+
38
+ @st.cache_resource
39
+ def load_model():
40
+ return Llama.from_pretrained(
41
+ repo_id="Mykes/med_phi3-mini-4k-GGUF",
42
+ filename="*Q4_K_M.gguf",
43
+ verbose=False,
44
+ n_ctx=256,
45
+ n_batch=256,
46
+ n_threads=4
47
+ )
48
+
49
+ llm = load_model()
50
+
51
+ basic_prompt = "Q: {question}\nA:"
52
+
53
+ # Initialize chat history
54
+ if "messages" not in st.session_state:
55
+ st.session_state.messages = []
56
+
57
+ # Display chat messages from history on app rerun
58
+ for message in st.session_state.messages:
59
+ with st.chat_message(message["role"]):
60
+ st.markdown(message["content"])
61
+
62
+ # React to user input
63
+ if prompt := st.chat_input("What is your question?"):
64
+ # Display user message in chat message container
65
+ st.chat_message("user").markdown(prompt)
66
+ # Add user message to chat history
67
+ st.session_state.messages.append({"role": "user", "content": prompt})
68
+
69
+ model_input = basic_prompt.format(question=prompt)
70
+
71
+ # Display assistant response in chat message container
72
+ with st.chat_message("assistant"):
73
+ message_placeholder = st.empty()
74
+ full_response = ""
75
+
76
+ for token in llm(
77
+ model_input,
78
+ max_tokens=None,
79
+ stop=["<end_of_turn>"],
80
+ echo=True,
81
+ stream=True
82
+ ):
83
+ full_response += token['choices'][0]['text']
84
+ message_placeholder.markdown(full_response + "β–Œ")
85
+ message_placeholder.markdown(full_response)
86
+
87
+ # Add assistant response to chat history
88
+ st.session_state.messages.append({"role": "assistant", "content": full_response})
89
+
90
+ st.sidebar.title("Chat with AI")
91
+ st.sidebar.markdown("This is a simple chat interface using Streamlit and an AI model.")