linwin / app.py
momegas's picture
Create app.py
cf396d5
raw
history blame
No virus
2.56 kB
from langchain.document_loaders import DirectoryLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from langchain.chat_models import ChatOpenAI
from langchain.retrievers.multi_query import MultiQueryRetriever
import dotenv
from langchain.indexes import VectorstoreIndexCreator
from langchain.chains.question_answering import load_qa_chain
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
from langchain.chat_models import ChatOpenAI
from langchain.schema import AIMessage, HumanMessage, SystemMessage
import gradio as gr
dotenv.load_dotenv()
system_message = """You are the helpful assistant for accountants.
You answers should be in Greek.
If you don't know the answer, just say that you don't know, don't try to make up an answer.".
"""
prompt_template = """Use the following pieces of context to answer the question at the end.
If you don't know the answer, just say that you don't know, don't try to make up an answer.
Only answer questions that are related to the context. If it's not in the context say "Δεν γνωρίζω".
Context:
{context}
Question: {question}
Answer in Greek:
"""
PROMPT = PromptTemplate(
template=prompt_template, input_variables=["context", "question"]
)
loader = DirectoryLoader("./documents", glob="**/*.txt", show_progress=True)
docs = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1500, chunk_overlap=400)
texts = text_splitter.split_documents(docs)
embeddings = OpenAIEmbeddings()
docsearch = Chroma.from_documents(texts, embeddings).as_retriever()
chat = ChatOpenAI(temperature=0.1)
with gr.Blocks() as demo:
chatbot = gr.Chatbot()
msg = gr.Textbox()
clear = gr.ClearButton([msg, chatbot])
def respond(message, chat_history):
messages = [
SystemMessage(content=system_message),
]
result_docs = docsearch.get_relevant_documents(message)
for doc in result_docs[:3]:
print("Result: ", doc, "\n\n")
human_message = None
human_message = HumanMessage(
content=PROMPT.format(context=result_docs[:3], question=message)
)
messages.append(human_message)
result = chat(messages)
bot_message = result.content
chat_history.append((message, bot_message))
return "", chat_history
msg.submit(respond, [msg, chatbot], [msg, chatbot])
if __name__ == "__main__":
demo.launch()