File size: 3,503 Bytes
98f99ea
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c0bf0aa
98f99ea
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c0bf0aa
98f99ea
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
from dotenv import load_dotenv
import openai #importing openai for API usage
import chainlit as cl #importing chainlit for our app
from aimakerspace.vectordatabase import VectorDatabase
from aimakerspace.vectordatabase import asyncio
from aimakerspace.text_utils import TextFileLoader
import os
import openai
from getpass import getpass
from aimakerspace.openai_utils.prompts import (
    UserRolePrompt,
    SystemRolePrompt,
    AssistantRolePrompt,
)
from aimakerspace.openai_utils.chatmodel import ChatOpenAI


load_dotenv()

openai.api_key = os.environ["OPENAI_API_KEY"]
def load(filename):
    text_loader = TextFileLoader(filename)
    documents = text_loader.load_documents()
    return documents

model_name = "gpt-4"

filname = "data/KingLear.txt"
openai.api_key = getpass("OpenAI API Key: ")
os.environ["OPENAI_API_KEY"] = openai.api_key
vector_db = VectorDatabase()
split_documents = load(filname)
vector_db = asyncio.run(vector_db.abuild_from_list(split_documents))
chat_openai = ChatOpenAI()
user_prompt_template = "{content}"
user_role_prompt = UserRolePrompt(user_prompt_template)
system_prompt_template = (
    "You are an expert in {expertise}, you always answer in a kind way."
)
system_role_prompt = SystemRolePrompt(system_prompt_template)
RAQA_PROMPT_TEMPLATE = """
Use the provided context to answer the user's query.

You may not answer the user's query unless there is specific context in the following text.

If you do not know the answer, or cannot answer, please respond with "I don't know".

Context:
{context}
"""

raqa_prompt = SystemRolePrompt(RAQA_PROMPT_TEMPLATE)

USER_PROMPT_TEMPLATE = """
User Query:
{user_query}
"""

user_prompt = UserRolePrompt(USER_PROMPT_TEMPLATE)
class RetrievalAugmentedQAPipeline:
    def __init__(self, llm: ChatOpenAI(), vector_db_retriever: VectorDatabase) -> None:
        self.llm = llm
        self.vector_db_retriever = vector_db_retriever

    def run_pipeline(self, user_query: str) -> str:
        context_list = self.vector_db_retriever.search_by_text(user_query, k=4)

        context_prompt = ""
        for context in context_list:
            context_prompt += context[0] + "\n"

        formatted_system_prompt = raqa_prompt.create_message(context=context_prompt)

        formatted_user_prompt = user_prompt.create_message(user_query=user_query)

        return self.llm.run([formatted_system_prompt, formatted_user_prompt])


@cl.on_chat_start # marks a function that will be executed at the start of a user session
def start_chat():
    cl.user_session.set(
        "message_history",
        [{"role": "system", "content": "You are a helpful assistant."}],
    )
    settings = {
        "temperature": 0.7, # higher value increases output diveresity/randomness
        "max_tokens": 500, # maximum length of output response
        "top_p": 1, # choose only the top x% of possible words to return
        "frequency_penalty": 0, # higher value will result in the model being more conservative in its use of repeated tokens.
        "presence_penalty": 0, # higher value will result in the model being more likely to generate tokens that have not yet been included in the generated text
    }
    cl.user_session.set("settings", settings)


@cl.on_message  # this function will be called every time a user inputs a message in the UI
async def main(message: str):

    qaPipeline =  RetrievalAugmentedQAPipeline(vector_db_retriever=vector_db, llm=chat_openai)
    qaPipeline.run_pipeline(user_query=message)