|
from langchain.chains import ConversationChain |
|
from langchain.chains.conversation.memory import ConversationBufferWindowMemory |
|
from langchain.prompts import ( |
|
SystemMessagePromptTemplate, |
|
HumanMessagePromptTemplate, |
|
ChatPromptTemplate, |
|
MessagesPlaceholder |
|
) |
|
import streamlit as st |
|
from streamlit_chat import message |
|
from utils import * |
|
from langchain_community.llms import HuggingFaceHub |
|
import configparser |
|
import os |
|
|
|
|
|
|
|
huggingfacehub_api_token = os.environ['HF_API_TOKEN'] |
|
st.subheader("DSlogic") |
|
|
|
if 'responses' not in st.session_state: |
|
st.session_state['responses'] = ["How can I assist you?"] |
|
|
|
if 'requests' not in st.session_state: |
|
st.session_state['requests'] = [] |
|
|
|
|
|
repo_id = "mistralai/Mixtral-8x7B-Instruct-v0.1" |
|
llm = HuggingFaceHub(huggingfacehub_api_token=huggingfacehub_api_token, |
|
repo_id=repo_id, model_kwargs={"temperature":0.5, "max_new_tokens":250}) |
|
|
|
if 'buffer_memory' not in st.session_state: |
|
st.session_state.buffer_memory=ConversationBufferWindowMemory(k=3,return_messages=True) |
|
|
|
|
|
system_msg_template = SystemMessagePromptTemplate.from_template(template="""You are a Data Science expert and are helping data scientists. Answer the question as truthfully as possible using the provided context, |
|
and if the answer is not contained within the text below or in your knowledge, say 'I don't know'. Answer in steps if possible.""") |
|
|
|
|
|
human_msg_template = HumanMessagePromptTemplate.from_template(template="{input}") |
|
|
|
prompt_template = ChatPromptTemplate.from_messages([system_msg_template, MessagesPlaceholder(variable_name="history"), human_msg_template]) |
|
|
|
conversation = ConversationChain(memory=st.session_state.buffer_memory, prompt=prompt_template, llm=llm, verbose=True) |
|
|
|
if "messages" not in st.session_state: |
|
st.session_state.messages = [] |
|
|
|
|
|
for message in st.session_state.messages: |
|
with st.chat_message(message["role"]): |
|
st.markdown(message["content"]) |
|
|
|
if prompt := st.chat_input("What is up?"): |
|
st.session_state.messages.append({"role": "user", "content": prompt}) |
|
with st.chat_message("user"): |
|
st.markdown(prompt) |
|
|
|
with st.chat_message("assistant"): |
|
message_placeholder = st.empty() |
|
query=st.session_state.messages[-1]["content"] |
|
context = find_match(query) |
|
full_response = "" |
|
for response in conversation.predict(input=f"Context:'{context}' \n\n Query:'{query}'"): |
|
full_response += response |
|
message_placeholder.markdown(full_response + "β") |
|
message_placeholder.markdown(full_response) |
|
st.session_state.messages.append({"role": "assistant", "content": full_response}) |