File size: 14,586 Bytes
59dd032
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
"""
Python Backend API to chat with private data  

08/14/2023
D.M. Theekshana Samaradiwakara
"""

import os
import time

from dotenv import load_dotenv

from langchain.chains import RetrievalQA
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler

from langchain.llms import GPT4All
from langchain.llms import HuggingFaceHub
from langchain.chat_models import ChatOpenAI
from langchain.chat_models import ChatAnyscale

# from langchain.retrievers.self_query.base import SelfQueryRetriever
# from langchain.chains.query_constructor.base import AttributeInfo

# from chromaDb import load_store
from faissDb import load_FAISS_store

from langchain.agents import ZeroShotAgent, Tool, AgentExecutor

from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain, ConversationalRetrievalChain
from conversationBufferWindowMemory import ConversationBufferWindowMemory
from langchain.memory import ReadOnlySharedMemory

load_dotenv()

#gpt4 all model
gpt4all_model_path = os.environ.get('GPT4ALL_MODEL_PATH')
model_n_ctx = os.environ.get('MODEL_N_CTX')
model_n_batch = int(os.environ.get('MODEL_N_BATCH',8))
target_source_chunks = int(os.environ.get('TARGET_SOURCE_CHUNKS',4))

openai_api_key = os.environ.get('OPENAI_API_KEY')
anyscale_api_key = os.environ.get('ANYSCALE_ENDPOINT_TOKEN')

verbose = os.environ.get('VERBOSE')

# activate/deactivate the streaming StdOut callback for LLMs
callbacks = [StreamingStdOutCallbackHandler()]

memory = ConversationBufferWindowMemory(
            memory_key="chat_history",
            input_key="question",
            return_messages=True,
            k=3
        )

readonlymemory = ReadOnlySharedMemory(memory=memory)

class Singleton:
   __instance = None
   @staticmethod 
   def getInstance():
      """ Static access method. """
      if Singleton.__instance == None:
         Singleton()
      return Singleton.__instance
   def __init__(self):
      """ Virtually private constructor. """
      if Singleton.__instance != None:
         raise Exception("This class is a singleton!")
      else:
         Singleton.__instance = QAPipeline()

def get_local_LLAMA2():
    import torch
    from transformers import AutoTokenizer, AutoModelForCausalLM

    tokenizer = AutoTokenizer.from_pretrained("NousResearch/Llama-2-13b-chat-hf",
                                        # use_auth_token=True,
                                        )

    model = AutoModelForCausalLM.from_pretrained("NousResearch/Llama-2-13b-chat-hf",
                                            device_map='auto',
                                            torch_dtype=torch.float16,
                                            use_auth_token=True,
                                        #  load_in_8bit=True,
                                        #  load_in_4bit=True
                                        )
    from transformers import pipeline

    pipe = pipeline("text-generation",
                    model=model,
                    tokenizer= tokenizer,
                    torch_dtype=torch.bfloat16,
                    device_map="auto",
                    max_new_tokens = 512,
                    do_sample=True,
                    top_k=30,
                    num_return_sequences=1,
                    eos_token_id=tokenizer.eos_token_id
                    )
    
    from langchain import HuggingFacePipeline
    LLAMA2 = HuggingFacePipeline(pipeline = pipe, model_kwargs = {'temperature':0})
    print(f"\n\n> torch.cuda.is_available(): {torch.cuda.is_available()}")
    print("\n\n> local LLAMA2 loaded")
    return LLAMA2

class QAPipeline:

    def __init__(self):

        print("\n\n> Initializing QAPipeline:")
        self.llm_name = None
        self.llm = None

        self.dataset_name = None
        self.vectorstore = None

        self.qa_chain = None
        self.agent = None


    def run(self,query, model, dataset):

        if (self.llm_name != model) or (self.dataset_name != dataset) or (self.qa_chain == None):
            self.set_model(model)
            self.set_vectorstore(dataset)
            self.set_qa_chain()

        # Get the answer from the chain
        start = time.time()
        res = self.qa_chain(query)
        # answer, docs = res['result'],res['source_documents']
        end = time.time()

        # Print the result
        print("\n\n> Question:")
        print(query)
        print(f"\n> Answer (took {round(end - start, 2)} s.):")
        print( res)

        return res
    
    def run_agent(self,query, model, dataset):

        try:

            if (self.llm_name != model) or (self.dataset_name != dataset) or (self.agent == None):
                self.set_model(model)
                self.set_vectorstore(dataset)
                self.set_qa_chain_with_agent()

            # Get the answer from the chain
            start = time.time()
            res = self.agent(query)
            # answer, docs = res['result'],res['source_documents']
            end = time.time()

            # Print the result
            print("\n\n> Question:")
            print(query)
            print(f"\n> Answer (took {round(end - start, 2)} s.):")
            print( res)

            return res["output"]
    
        except Exception as e:
        # logger.error(f"Answer retrieval failed with {e}")
            print(f"> QAPipeline run_agent Error : {e}")#, icon=":books:")
            return 


    def set_model(self,model_type):
        if model_type != self.llm_name:
            match model_type:
                case "gpt4all":
                    # self.llm = GPT4All(model=gpt4all_model_path, n_ctx=model_n_ctx, backend='gptj', n_batch=model_n_batch, callbacks=callbacks, verbose=verbose)
                    self.llm = GPT4All(model=gpt4all_model_path, max_tokens=model_n_ctx, backend='gptj', n_batch=model_n_batch, callbacks=callbacks, verbose=verbose)
                    # self.llm = HuggingFaceHub(repo_id="nomic-ai/gpt4all-j", model_kwargs={"temperature":0.001, "max_length":1024})
                case "google/flan-t5-xxl":
                    self.llm = HuggingFaceHub(repo_id="google/flan-t5-xxl", model_kwargs={"temperature":0.001, "max_length":1024})
                case "tiiuae/falcon-7b-instruct":
                    self.llm = HuggingFaceHub(repo_id=model_type, model_kwargs={"temperature":0.001, "max_length":1024})
                case "openai":
                    self.llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0)
                case "Deci/DeciLM-6b-instruct":
                    self.llm = ChatOpenAI(model_name="Deci/DeciLM-6b-instruct", temperature=0)
                case "Deci/DeciLM-6b":
                    self.llm = ChatOpenAI(model_name="Deci/DeciLM-6b", temperature=0)
                case "local/LLAMA2":
                    self.llm = get_local_LLAMA2()
                case "anyscale/Llama-2-13b-chat-hf":
                    self.llm = ChatAnyscale(anyscale_api_key=anyscale_api_key,temperature=0, model_name='meta-llama/Llama-2-13b-chat-hf', streaming=False)
                case "anyscale/Llama-2-70b-chat-hf":
                    self.llm = ChatAnyscale(anyscale_api_key=anyscale_api_key,temperature=0, model_name='meta-llama/Llama-2-70b-chat-hf', streaming=False)
                case _default:
                    # raise exception if model_type is not supported
                    raise Exception(f"Model type {model_type} is not supported. Please choose a valid one")
                 
            self.llm_name = model_type
    


    def set_vectorstore(self, dataset):
        if dataset != self.dataset_name:
            # self.vectorstore = load_store(dataset)
            self.vectorstore = load_FAISS_store()
            print("\n\n> vectorstore loaded:")
            self.dataset_name = dataset
    
    def set_qa_chain(self):
    
        self.qa_chain = RetrievalQA.from_chain_type(
            llm=self.llm,
            chain_type="stuff",
            retriever = self.vectorstore.as_retriever(), 
            # retriever = self.vectorstore.as_retriever(search_kwargs={"k": target_source_chunks}
            return_source_documents= True
        )


    def set_qa_chain_with_agent(self):

        try:

            # Define a custom prompt
            general_qa_template = (
                """[INST]<<SYS>> You are the AI of company boardpac which provide services to company board members related to banking and financial sector. You should only continue the conversation and reply to users questions like welcomes, greetings and goodbyes.
                If you dont know the answer say you dont know, dont try to makeup answers. Answer should be short and simple as possible. Start the answer with code word Boardpac AI (chat): <</SYS>>
                Conversation: {chat_history}
                Question: {question} [/INST]"""
            )

            general_qa_chain_prompt = PromptTemplate(input_variables=["question", "chat_history"], template=general_qa_template)
            
            general_qa_chain = LLMChain(
                llm=self.llm, 
                prompt=general_qa_chain_prompt,
                verbose=True,
                memory=readonlymemory,  # use the read-only memory to prevent the tool from modifying the memory
            )

            general_qa_chain_tool = Tool(
                    name="general qa",
                    func= general_qa_chain.run,
                    description='''use this when only you need to answer questions like welcomes, greetings and goodbyes. 
                    Input should be a fully formed question.''',
                    return_direct=True,
                    
            )

            # Define a custom prompt
            retrieval_qa_template = (
                """[INST]<<SYS>> You are the AI of company boardpac which provide services to company board members. Only answer questions related to Banking and Financial Services Sector like Banking & Financial regulations, legal framework, governance framework, compliance requirements as per Central Bank regulations.
                please answer the question based on the chat history and context information provided below related to central bank acts published in various years. The published year is mentioned as the  metadata 'year' of each source document.
                The content of a bank act of a past year can updated by a bank act from a latest year. Always try to answer with latest information and mention the year which information extracted.
                If you dont know the answer say you dont know, dont try to makeup answers. Answer should be short and simple as possible. Start the answer with code word Boardpac AI (QA): <</SYS>>
                Conversation: {chat_history}
                Context: {context}
                Question : {question} [/INST]"""
            )

            retrieval_qa_chain_prompt = PromptTemplate(
                input_variables=["question", "context", "chat_history"], 
                template=retrieval_qa_template
            )
            
            document_combine_prompt = PromptTemplate(
                input_variables=["source","year", "page","page_content"],
                template= 
                """<doc> source: {source}, year: {year}, page: {page}, page content: {page_content} </doc>"""
            )

            bank_regulations_qa = ConversationalRetrievalChain.from_llm(
                llm=self.llm,
                chain_type="stuff",
                retriever = self.vectorstore.as_retriever(),
                # retriever = self.vectorstore.as_retriever(
                #     search_type="mmr",
                #     search_kwargs={
                #         'k': 6, 
                #         # 'lambda_mult': 0.1, 
                #         'fetch_k': 50},
                #     # search_type="similarity_score_threshold",  
                #     # search_kwargs={"score_threshold": .5}  
                # ),
                return_source_documents= True,
                return_generated_question= True,
                get_chat_history=lambda h : h,
                combine_docs_chain_kwargs={
                    "prompt": retrieval_qa_chain_prompt,
                    "document_prompt": document_combine_prompt,
                },
                verbose=True,
                memory=readonlymemory,  # use the read-only memory to prevent the tool from modifying the memory
            )

            bank_regulations_qa_tool = Tool(
                name="bank regulations",
                func= lambda question: bank_regulations_qa({"question": question}),
                description='''Use this more  when you need to answer questions about Banking and Financial Services Sector like Banking & Financial regulations, legal framework, governance framework, compliance requirements as per Central Bank regulations. 
                Input should be a fully formed question.''',
                return_direct=True,
            )

            tools = [
                bank_regulations_qa_tool,
                general_qa_chain_tool
            ]

            prefix = """<<SYS>> You are the AI of company boardpac which provide services to company board members related to banking and financial sector. Have a conversation with the user, answering the following questions as best you can. You have access to the following tools:"""
            suffix = """Begin! "
            {agent_scratchpad} 
            <chat history>: {chat_history}
            <</SYS>>

            [INST]
            <Question>: {question}
            [/INST]"""

            agent_prompt = ZeroShotAgent.create_prompt(
                tools,
                prefix=prefix,
                suffix=suffix,
                input_variables=["question", "chat_history", "agent_scratchpad"],
            )

            llm_chain = LLMChain(llm=self.llm, prompt=agent_prompt)

            agent = ZeroShotAgent(
                llm_chain=llm_chain, 
                tools=tools, 
                verbose=True,
            )

            agent_chain = AgentExecutor.from_agent_and_tools(
                agent=agent, 
                tools=tools, 
                verbose=True,
                memory=memory,
                handle_parsing_errors=True,
            )

            self.agent = agent_chain

            print(f"\n> agent_chain created")

        except Exception as e:
            # logger.error(f"Answer retrieval failed with {e}")
            print(f"> QAPipeline set_qa_chain_with_agent Error : {e}")#, icon=":books:")
            return