File size: 4,912 Bytes
79f1ae1
9f3be8d
be8a991
 
9f3be8d
be8a991
79f1ae1
be8a991
79f1ae1
 
 
 
 
be8a991
79f1ae1
 
be8a991
9f3be8d
79f1ae1
 
 
be8a991
9f3be8d
 
79f1ae1
 
 
be8a991
79f1ae1
9f3be8d
be8a991
 
 
79f1ae1
 
9f3be8d
 
 
 
 
79f1ae1
 
 
 
 
 
be8a991
9f3be8d
79f1ae1
 
be8a991
79f1ae1
 
be8a991
79f1ae1
 
 
 
 
be8a991
 
 
 
 
 
 
 
 
 
 
 
 
9f3be8d
be8a991
 
9f3be8d
be8a991
 
 
9f3be8d
79f1ae1
be8a991
 
9f3be8d
79f1ae1
 
 
 
 
9f3be8d
 
79f1ae1
 
 
9f3be8d
79f1ae1
 
9f3be8d
79f1ae1
 
 
be8a991
9f3be8d
 
79f1ae1
 
9f3be8d
be8a991
 
 
 
 
9f3be8d
 
 
79f1ae1
9f3be8d
be8a991
 
9f3be8d
79f1ae1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
be8a991
79f1ae1
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
#-----Import Required Libraries-----#
import os
import chainlit as cl
import tiktoken
import openai
import fitz  
import pandas as pd
from dotenv import load_dotenv
from transformers import pipeline
from qdrant_client import QdrantClient
from qdrant_client.http import models as qdrant_models
from langchain.document_loaders import PyMuPDFLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings 
from langchain_community.vectorstores import Qdrant
from langchain.prompts import ChatPromptTemplate
from langchain.chat_models import ChatOpenAI 
from operator import itemgetter
from langchain.schema.output_parser import StrOutputParser
from langchain.schema.runnable import RunnablePassthrough

# Set environment variables
load_dotenv()

# Load environment variables
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")

# Initialize OpenAI
openai.api_key = OPENAI_API_KEY

# Load embedding model
embeddings = OpenAIEmbeddings(model="text-embedding-3-small")

loader = PyMuPDFLoader("./data/Airbnb-10k.pdf")
documents = loader.load()

def tiktoken_len(text):
    tokens = tiktoken.encoding_for_model("gpt-4o").encode(text)
    return len(tokens)

text_splitter = RecursiveCharacterTextSplitter(
    chunk_size=500, 
    chunk_overlap=100,
    length_function = tiktoken_len
)

split_documents = text_splitter.split_documents(documents)

# Creating a Qdrant Vector Store
qdrant_vector_store = Qdrant.from_documents(
    split_documents,
    embeddings,
    location=":memory:",
    collection_name="Airbnb-10k",
)

# Create a Retriever
retriever = qdrant_vector_store.as_retriever()

# -- AUGMENTED -- #
"""
1. Define a String Template
2. Create a Prompt Template from the String Template
"""
### 1. DEFINE STRING TEMPLATE
RAG_PROMPT_TEMPLATE = """\
<|start_header_id|>system<|end_header_id|>
You are a helpful assistant. You answer user questions based on provided context. If you can't answer the question with the provided context,\
    say you don't know.<|eot_id|>
<|start_header_id|>user<|end_header_id|>
User Query:
{query}
Context:
{context}<|eot_id|>
<|start_header_id|>assistant<|end_header_id|>
"""
#Note that we do not have the response here. We have assistent, we ONLY start, but not followed by <|eot_id> as we do not have a response YET.

rag_prompt = ChatPromptTemplate.from_template(RAG_PROMPT_TEMPLATE)


# Define the LLM
llm = ChatOpenAI(model_name="gpt-4o")

#-----Creating a Retrieval Augmented Generation (RAG) Chain-----#
# The RAG chain:
# (1) Takes the user question and retrieves relevant context, 
# (2) Passes the context through unchanged, 
# (3) Formats the prompt with context and question, then send it to the LLM to generate a response

retrieval_augmented_qa_chain = (
    # INVOKE CHAIN WITH: {"question" : "<>"}
    # "question" : populated by getting the value of the "question" key
    # "context"  : populated by getting the value of the "question" key and chaining it into the base_retriever
    {"context": itemgetter("question") | retriever, "question": itemgetter("question")}
    # "context"  : is assigned to a RunnablePassthrough object (will not be called or considered in the next step)
    #              by getting the value of the "context" key from the previous step
    | RunnablePassthrough.assign(context=itemgetter("context"))
    # "response" : the "context" and "question" values are used to format our prompt object and then piped
    #              into the LLM and stored in a key called "response"
    # "context"  : populated by getting the value of the "context" key from the previous step
    | {"response": rag_prompt | llm, "context": itemgetter("context")}
)

# Sets initial chat settings at the start of a user session
@cl.on_chat_start  
async def start_chat():
    """
    This function will be called at the start of every user session. 
    We will build our LCEL RAG chain here, and store it in the user session. 
    The user session is a dictionary that is unique to each user session, and is stored in the memory of the server.
    """
    settings = {
        "model": "gpt-4o",
        "temperature": 0,
        "max_tokens": 500,
        "frequency_penalty": 0,
        "top_p": 1,
        
    }
    cl.user_session.set("settings", settings)

# Processes incoming messages from the user and sends a response through a series of steps:
# (1) Retrieves the user's settings
# (2) Invokes the RAG chain with the user's message
# (3) Extracts the content from the response and sends it back to the user

@cl.on_message 
async def handle_message(message: cl.Message):
    settings = cl.user_session.get("settings")

    response = retrieval_augmented_qa_chain.invoke({"question": message.content})


    # Extracting and sending just the content
    content = response["response"].content
    pretty_content = content.strip()  

    await cl.Message(content=pretty_content).send()