Spaces:
Sleeping
Sleeping
import gradio as gr | |
from langchain.vectorstores import Qdrant | |
import qdrant_client | |
from langchain.schema.runnable import RunnablePassthrough | |
from langchain.schema.output_parser import StrOutputParser | |
from langchain.prompts import ChatPromptTemplate | |
from langchain.chat_models import ChatOpenAI | |
from langchain.embeddings import OpenAIEmbeddings | |
import dotenv | |
import os | |
from utils import template | |
import time | |
# Load environment variables and validate | |
dotenv.load_dotenv() | |
QDRANT_URL = os.getenv("QDRANT_URL") | |
QDRANT_API_KEY = os.getenv("QDRANT_API_KEY") | |
if not QDRANT_URL or not QDRANT_API_KEY: | |
raise ValueError("QDRANT_URL and QDRANT_API_KEY must be set in the environment") | |
# Initialize the vector store | |
def initiliaze_vector_store(): | |
""" | |
Initialize and return the vector store. | |
Only run this on launch. | |
""" | |
embeddings = OpenAIEmbeddings() | |
client = qdrant_client.QdrantClient(url=QDRANT_URL, api_key=QDRANT_API_KEY) | |
vectorstore = Qdrant(client=client, collection_name="doc_datategy", embeddings=embeddings) | |
return vectorstore | |
# Initialize the retriever | |
def initiliaze_retriever(vectorstore): | |
""" | |
Initialize and return the retriever using the given vectorstore. | |
""" | |
return vectorstore.as_retriever() | |
# Initialize the chatbot | |
def initiliaze_chatbot(template, model_name="gpt-3.5-turbo-1106", temperature=0): | |
""" | |
Initialize and return the chatbot components: prompt and language model. | |
""" | |
prompt = ChatPromptTemplate.from_template(template) | |
llm = ChatOpenAI(model_name=model_name, temperature=temperature) | |
return prompt, llm | |
# Initialize the RAG chain | |
def initiliaze_RAG(retriever, prompt, llm): | |
""" | |
Initialize and return the RAG chain. | |
""" | |
context_function = {"context": retriever, "question": RunnablePassthrough()} | |
rag_chain = context_function | prompt | llm | StrOutputParser() | |
return rag_chain | |
# Launch Gradio app | |
vectorstore = initiliaze_vector_store() | |
retriever = initiliaze_retriever(vectorstore) | |
with gr.Blocks() as demo: | |
chatbot = gr.Chatbot(label="PapAI custom chatbot") | |
msg = gr.Textbox(label="Prompt", value='PapAI?', interactive=True) | |
clear = gr.Button("Clear") | |
template_user = gr.Textbox(label="Template", value=template, interactive=True) | |
def change_template(template_user_str): | |
prompt, llm = initiliaze_chatbot(template_user_str) | |
return initiliaze_RAG(retriever, prompt, llm) | |
def RAG_answer(query, chat_history, template_user_str): | |
rag_chain = change_template(template_user_str) | |
answer = rag_chain.invoke(query) | |
chat_history.append((query, answer)) | |
time.sleep(1.3) # Consider optimizing or dynamic handling | |
return "", chat_history | |
msg.submit(RAG_answer, [msg, chatbot, template_user], [msg, chatbot]) | |
demo.queue() | |
demo.launch(share=False, debug=True) | |