Spaces:
Runtime error
Runtime error
import gradio as gr | |
from langchain_community.chat_models import ChatOpenAI | |
from langchain.memory import ConversationSummaryBufferMemory | |
from langchain.chains import ConversationalRetrievalChain | |
from langchain_community.document_loaders import PyPDFLoader | |
from langchain_community.embeddings import OpenAIEmbeddings | |
from langchain.text_splitter import RecursiveCharacterTextSplitter | |
from langchain_community.vectorstores import FAISS | |
from queue import SimpleQueue | |
from langchain.prompts import PromptTemplate | |
from langchain_core.runnables import RunnableLambda, RunnablePassthrough | |
from operator import itemgetter | |
from langfuse.callback import CallbackHandler | |
import os | |
from dotenv import load_dotenv | |
load_dotenv() | |
openai_api_key = os.environ['OPENAI_API_KEY'] | |
openai_model = "gpt-3" | |
def setup_qa_chain(file_paths): | |
docs = [] | |
for file_path in file_paths: | |
loader = PyPDFLoader(file_path) | |
docs.extend(loader.load()) | |
return docs | |
def split_docs(docs): | |
text_splitter = RecursiveCharacterTextSplitter( | |
chunk_size=500, | |
chunk_overlap=50) | |
return text_splitter.split_documents(docs) | |
def create_embeddings(docs, openai_api_key): | |
embeddings = OpenAIEmbeddings(openai_api_key=openai_api_key) | |
vectordb = FAISS.from_documents(docs, embeddings) | |
return vectordb | |
def create_retriever(vectordb): | |
retriever = vectordb.as_retriever(search_type='mmr') | |
return retriever | |
def create_memory(): | |
memory = ConversationSummaryBufferMemory( | |
memory_key='chat_history', | |
output_key='answer', | |
llm=ChatOpenAI(model_name=openai_model, temperature=0.7, openai_api_key=openai_api_key), | |
return_messages=True | |
) | |
return memory | |
def create_qa_chain(file_paths, openai_model, openai_api_key): | |
# Load PDF documents | |
docs = setup_qa_chain(file_paths) | |
# Create embeddings | |
vectordb = create_embeddings(docs, openai_api_key) | |
# Create retriever | |
retriever = create_retriever(vectordb) | |
# Create memory | |
memory = create_memory() | |
# Setup LLM and QA chain | |
llm_defined = ChatOpenAI(model_name=openai_model, temperature=0.7, openai_api_key=openai_api_key) | |
qa_chain = ConversationalRetrievalChain.from_llm( | |
llm=llm_defined, | |
retriever=retriever, | |
memory=memory, | |
return_source_documents=True, | |
verbose=True, | |
callbacks=[langfuse_handler] | |
) | |
return qa_chain | |
langfuse_handler = CallbackHandler( | |
public_key=os.environ['LANGFUSE_PUBLIC_KEY'], | |
trace_name='WareHouseAsst', | |
secret_key=os.environ['LANGFUSE_SECRET_KEY'], | |
host=os.environ["LANGFUSE_HOST_SELF"] , | |
session_id="1123", | |
release="1.0.0", | |
user_id="Rish", | |
debug=True | |
) | |
if __name__ == "__main__": | |
# Define constants | |
#openai_model = "gpt-3.5-turbo-0613" | |
# Define PDF file paths | |
pdf_files = [ | |
r"Advantage Dashboard Functional Description.pdf", | |
r"user_guide.pdf", | |
r"data_AADData_test_big_krish.pdf", | |
r"data_SOPs for Warehouse Managers_krish.pdf" | |
] | |
# Setup QA chain | |
qa_chain = create_qa_chain(pdf_files, openai_model, openai_api_key) | |
template = "You are an agent responsible for giving answers to the questions asked from your knowledge base.'{question}'" | |
prompt = PromptTemplate.from_template(template) | |
memory = ConversationSummaryBufferMemory( | |
memory_key='history', | |
return_messages=True, | |
llm=ChatOpenAI(model_name=openai_model, temperature=0.7, openai_api_key=openai_api_key) | |
) | |
llm=ChatOpenAI(model_name=openai_model, temperature=0.7, openai_api_key=openai_api_key) | |
def process_qa_chain_output(output): | |
answer = str(output["answer"]) # Convert the answer to a string | |
return answer | |
conversation_chain = ( | |
RunnablePassthrough.assign( | |
history=RunnableLambda(memory.load_memory_variables) | itemgetter("history") | |
) | |
| prompt | |
| llm | |
) | |
queue = SimpleQueue() | |
#def add_to_queue(message): | |
# queue.put(message) | |
global_cb = None | |
with gr.Blocks() as demo: | |
with gr.Row(): | |
gr.Markdown("<h1 style='text-align: center; width: 100%;'>Warehouse Assistant</h1>") | |
# Use Row to align elements horizontally | |
with gr.Row(): | |
# Left-most column for the Product Guide | |
with gr.Column(scale=1): | |
gr.Markdown("**Here's how to use this demo:**") | |
gr.Markdown("""This a conversational agent that works for the warehouse.\n | |
Ask me whatever you want about it, mate!!""") | |
# Right section for Dropdown and Chat Interface | |
with gr.Column(scale=3): | |
ex = [ | |
"Where can we find the tote tracking menu?", | |
"What are some key shortcut for quantity screen?", | |
"What are steps to process ASN?", | |
"Where is “Hold/Release Menu?", | |
"Tell me about SOP-DMS006: Proactive Delay Management", | |
"What is label Picking?", | |
"what is t_cartonize_results?" | |
] | |
chatbot = gr.Chatbot() | |
msg = gr.Textbox(label="Query:", placeholder="Enter your query here") | |
gr.Examples(ex, msg) | |
clear = gr.Button("Clear Chat History ") | |
def user(user_message, history): | |
""" | |
Process a user message and update the conversation history. | |
Parameters: | |
- user_message (str): The message from the user. | |
- history (list): The conversation history. | |
Returns: | |
- tuple: A tuple containing an empty string and the updated conversation history. | |
""" | |
return "", history + [[user_message, None]] | |
def ask_general_question(question): | |
""" | |
Ask a general question to GPT model. | |
""" | |
response = ChatOpenAI.ChatCompletion.create( | |
model="gpt-4o", | |
messages=[ | |
{"role": "system", "content": "You are a helpful assistant."}, | |
{"role": "user", "content": question} | |
] | |
) | |
return response.choices[0].message['content'] | |
def bot(history): | |
"""Generate responses for a conversation history. | |
This function processes a conversation history and generates responses | |
using a conversation chain and a handler. It updates the conversation history | |
with the generated responses and maintains a trace of the conversation. | |
Args: | |
history (list): A list containing the conversation history. | |
Yields: | |
list: A modified conversation history after generating a response. | |
""" | |
if history and history[-1][0]: | |
user_input = history[-1][0] | |
inputs = {"question": user_input} | |
output = qa_chain(inputs) | |
answer = process_qa_chain_output(output) | |
history[-1][1] = answer | |
# Check if the answer is adequate; here you can define what 'adequate' means | |
if not answer or answer in ['No relevant information found',"I don't know"]: | |
# Falling back to general GPT model | |
answer = ask_general_question(user_input) | |
history[-1][1] = answer | |
return history | |
msg.submit( | |
lambda user_message, history: [user_message, history + [[user_message, None]]], | |
[msg, chatbot], | |
[msg, chatbot], | |
queue=False | |
).then( | |
bot, | |
chatbot, | |
chatbot, | |
queue=False | |
) | |
def clear_history(): | |
memory.clear() | |
clear.click(fn=clear_history, inputs=None, outputs=chatbot, queue=False) | |
demo.queue() | |
demo.launch() | |
chatbot=gr.Chatbot(value=[]) | |
chatbot=gr.Chatbot(value=[]) |