File size: 2,611 Bytes
e5b5f0b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
import gradio as gr
import os
import pathlib
import random
#import torch
#import transformers
from langchain.document_loaders import TextLoader
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import FAISS
from langchain.prompts.chat import (
    ChatPromptTemplate,
    SystemMessagePromptTemplate,
    HumanMessagePromptTemplate,
)
from langchain.chat_models import ChatOpenAI
from langchain.chains import RetrievalQAWithSourcesChain

os.environ["OPENAI_API_KEY"] = "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"

# Set the data store directory
DATA_STORE_DIR = "data_store"

if os.path.exists(DATA_STORE_DIR):
    vector_store = FAISS.load_local(
        DATA_STORE_DIR,
        OpenAIEmbeddings()
    )
else:
    print(f"Missing files. Upload index.faiss and index.pkl files to {DATA_STORE_DIR} directory first")

system_template = """Use the following pieces of context to answer the user's question.
Take note of the sources and include them in the answer in the format: "SOURCES: source1", use "SOURCES" in capital letters regardless of the number of sources.
If you don't know the answer, just say "I don't know", don't try to make up an answer.
----------------
{summaries}"""

messages = [
    SystemMessagePromptTemplate.from_template(system_template),
    HumanMessagePromptTemplate.from_template("{question}")
]
prompt = ChatPromptTemplate.from_messages(messages)

llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0, max_tokens=256)

chain_type_kwargs = {"prompt": prompt}
chain = RetrievalQAWithSourcesChain.from_chain_type(
    llm=llm,
    chain_type="stuff",
    retriever=vector_store.as_retriever(),
    return_source_documents=True,
    chain_type_kwargs=chain_type_kwargs
)


class Chatbot:
    def __init__(self):
        self.query = None

    def chat(self, query):
        self.query = query
        result = chain(query)
        return result['answer']


chatbot = Chatbot()


# Create a Gradio interface
def chat_interface(query):
    response = chatbot.chat(query)
    return response


# inputs = gr.inputs.Textbox(lines=2, placeholder="Enter your message here...")
# outputs = gr.outputs.Textbox()

# chat_interface = gr.ChatInterface(chat_interface, inputs=inputs, outputs=outputs)
#
# chat_interface.launch()
gr.ChatInterface(
    chat_interface,
    chatbot=gr.Chatbot(height=300),
    textbox=gr.Textbox(placeholder="Ask me a yes or no question"),
    description="Ask Yes Man any question",
    theme="soft",
    cache_examples=True,
    clear_btn="Clear",
).launch()