jaothan commited on
Commit
6f93825
·
verified ·
1 Parent(s): 119bb9d

Upload 3 files

Browse files
chatbot_app_Containerfile ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ FROM registry.access.redhat.com/ubi9/python-311:1-77.1726664316
2
+ WORKDIR /chat
3
+ COPY requirements.txt .
4
+ RUN pip install --upgrade pip
5
+ RUN pip install --no-cache-dir --upgrade -r /chat/requirements.txt
6
+ COPY chatbot_ui.py .
7
+ EXPOSE 8501
8
+ ENTRYPOINT [ "streamlit", "run", "chatbot_ui.py" ]
chatbot_app_chatbot_ui.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain_openai import ChatOpenAI
2
+ from langchain.chains import LLMChain
3
+ from langchain_community.callbacks import StreamlitCallbackHandler
4
+ from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
5
+ from langchain.memory import ConversationBufferWindowMemory
6
+ import streamlit as st
7
+ import requests
8
+ import time
9
+ import json
10
+ import os
11
+
12
+ model_service = os.getenv("MODEL_ENDPOINT",
13
+ "http://localhost:8001")
14
+ model_service = f"{model_service}/v1"
15
+ model_service_bearer = os.getenv("MODEL_ENDPOINT_BEARER")
16
+ request_kwargs = {}
17
+ if model_service_bearer is not None:
18
+ request_kwargs = {"headers": {"Authorization": f"Bearer {model_service_bearer}"}}
19
+
20
+ @st.cache_resource(show_spinner=False)
21
+ def checking_model_service():
22
+ start = time.time()
23
+ print("Checking Model Service Availability...")
24
+ ready = False
25
+ while not ready:
26
+ try:
27
+ request_cpp = requests.get(f'{model_service}/models', **request_kwargs)
28
+ request_ollama = requests.get(f'{model_service[:-2]}api/tags', **request_kwargs)
29
+ if request_cpp.status_code == 200:
30
+ server = "Llamacpp_Python"
31
+ ready = True
32
+ elif request_ollama.status_code == 200:
33
+ server = "Ollama"
34
+ ready = True
35
+ except:
36
+ pass
37
+ time.sleep(1)
38
+ print(f"{server} Model Service Available")
39
+ print(f"{time.time()-start} seconds")
40
+ return server
41
+
42
+ def get_models():
43
+ try:
44
+ response = requests.get(f"{model_service[:-2]}api/tags", **request_kwargs)
45
+ return [i["name"].split(":")[0] for i in
46
+ json.loads(response.content)["models"]]
47
+ except:
48
+ return None
49
+
50
+ with st.spinner("Checking Model Service Availability..."):
51
+ server = checking_model_service()
52
+
53
+ def enableInput():
54
+ st.session_state["input_disabled"] = False
55
+
56
+ def disableInput():
57
+ st.session_state["input_disabled"] = True
58
+
59
+ st.title("💬 Chatbot")
60
+ if "messages" not in st.session_state:
61
+ st.session_state["messages"] = [{"role": "assistant",
62
+ "content": "How can I help you?"}]
63
+ if "input_disabled" not in st.session_state:
64
+ enableInput()
65
+
66
+ for msg in st.session_state.messages:
67
+ st.chat_message(msg["role"]).write(msg["content"])
68
+
69
+ @st.cache_resource()
70
+ def memory():
71
+ memory = ConversationBufferWindowMemory(return_messages=True,k=3)
72
+ return memory
73
+
74
+ model_name = os.getenv("MODEL_NAME", "")
75
+
76
+ if server == "Ollama":
77
+ models = get_models()
78
+ with st.sidebar:
79
+ model_name = st.radio(label="Select Model",
80
+ options=models)
81
+
82
+ llm = ChatOpenAI(base_url=model_service,
83
+ api_key="sk-no-key-required" if model_service_bearer is None else model_service_bearer,
84
+ model=model_name,
85
+ streaming=True,
86
+ callbacks=[StreamlitCallbackHandler(st.empty(),
87
+ expand_new_thoughts=True,
88
+ collapse_completed_thoughts=True)])
89
+
90
+ prompt = ChatPromptTemplate.from_messages([
91
+ ("system", "You are world class technical advisor."),
92
+ MessagesPlaceholder(variable_name="history"),
93
+ ("user", "{input}")
94
+ ])
95
+
96
+ chain = LLMChain(llm=llm,
97
+ prompt=prompt,
98
+ verbose=False,
99
+ memory=memory())
100
+
101
+ if prompt := st.chat_input(disabled=st.session_state["input_disabled"],on_submit=disableInput):
102
+ st.session_state.messages.append({"role": "user", "content": prompt})
103
+ st.chat_message("user").markdown(prompt)
104
+ response = chain.invoke(prompt)
105
+ st.chat_message("assistant").markdown(response["text"])
106
+ st.session_state.messages.append({"role": "assistant", "content": response["text"]})
107
+ enableInput()
108
+ st.rerun()
chatbot_app_requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ langchain==0.2.3
2
+ langchain-openai==0.1.7
3
+ langchain-community==0.2.4
4
+ streamlit==1.34.0