Spaces:
Sleeping
Sleeping
apahilaj
commited on
Commit
•
29f30e1
1
Parent(s):
3bb6107
app.py
Browse files
app.py
CHANGED
@@ -87,15 +87,32 @@
|
|
87 |
|
88 |
|
89 |
import gradio as gr
|
90 |
-
import
|
91 |
-
import re
|
92 |
from langchain.embeddings import HuggingFaceEmbeddings
|
93 |
-
from langchain.vectorstores import faiss
|
94 |
-
from langchain_community.llms import HuggingFaceHub
|
95 |
-
from langchain.chains import
|
|
|
96 |
from langchain_community.document_loaders import PyPDFLoader
|
97 |
-
from langchain.text_splitter import
|
|
|
|
|
98 |
from langchain.prompts import PromptTemplate
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
99 |
|
100 |
api_token = os.environ.get('HUGGINGFACEHUB_API_TOKEN')
|
101 |
|
@@ -105,7 +122,6 @@ model = HuggingFaceHub(
|
|
105 |
task="conversational",
|
106 |
model_kwargs={"temperature": 0.8, "max_length": 1000},
|
107 |
)
|
108 |
-
|
109 |
template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer. Use three sentences maximum. Keep the answer as concise as possible. Always say "thanks for asking!" at the end of the answer.
|
110 |
{context}
|
111 |
Question: {question}
|
@@ -113,13 +129,21 @@ Helpful Answer:"""
|
|
113 |
QA_CHAIN_PROMPT = PromptTemplate.from_template(template)
|
114 |
|
115 |
def load_db(file, k):
|
|
|
116 |
loader = PyPDFLoader(file)
|
117 |
documents = loader.load()
|
|
|
118 |
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=150)
|
119 |
docs = text_splitter.split_documents(documents)
|
|
|
120 |
embeddings = HuggingFaceEmbeddings()
|
121 |
-
|
|
|
|
|
122 |
retriever = db.as_retriever(search_type="similarity", search_kwargs={"k": k})
|
|
|
|
|
|
|
123 |
qa = ConversationalRetrievalChain.from_llm(
|
124 |
llm=model,
|
125 |
chain_type="stuff",
|
@@ -127,27 +151,24 @@ def load_db(file, k):
|
|
127 |
return_source_documents=True,
|
128 |
return_generated_question=True,
|
129 |
)
|
|
|
130 |
return qa
|
131 |
|
132 |
chat_history = [] # initialize chat history
|
133 |
|
134 |
-
def
|
135 |
global chat_history
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
return {"system": "", "user": user_input, "assistant": "No helpful answer found."}
|
150 |
-
return {"system": "", "user": "", "assistant": ""}
|
151 |
-
|
152 |
-
iface = gr.Interface(fn=greet, inputs=gr.Chat(), outputs=gr.Chat())
|
153 |
iface.launch(share=True)
|
|
|
87 |
|
88 |
|
89 |
import gradio as gr
|
90 |
+
import pandas as pd
|
|
|
91 |
from langchain.embeddings import HuggingFaceEmbeddings
|
92 |
+
from langchain.vectorstores import Chroma, faiss
|
93 |
+
from langchain_community.llms import HuggingFaceEndpoint, HuggingFaceHub
|
94 |
+
from langchain.chains import LLMChain
|
95 |
+
from langchain_community.document_loaders.csv_loader import CSVLoader
|
96 |
from langchain_community.document_loaders import PyPDFLoader
|
97 |
+
from langchain.text_splitter import CharacterTextSplitter
|
98 |
+
from langchain_community.document_loaders import TextLoader
|
99 |
+
from langchain_community import vectorstores
|
100 |
from langchain.prompts import PromptTemplate
|
101 |
+
from langchain.chains import RetrievalQA
|
102 |
+
from langchain.memory import ConversationBufferMemory
|
103 |
+
from langchain.chains import ConversationalRetrievalChain
|
104 |
+
from langchain.text_splitter import CharacterTextSplitter, RecursiveCharacterTextSplitter
|
105 |
+
from langchain.vectorstores import DocArrayInMemorySearch
|
106 |
+
from langchain.document_loaders import TextLoader
|
107 |
+
from langchain.chains import RetrievalQA, ConversationalRetrievalChain
|
108 |
+
from langchain.memory import ConversationBufferMemory
|
109 |
+
from langchain.chat_models import ChatOpenAI
|
110 |
+
from langchain.document_loaders import TextLoader
|
111 |
+
from langchain.document_loaders import PyPDFLoader
|
112 |
+
import panel as pn
|
113 |
+
import param
|
114 |
+
import re
|
115 |
+
import os
|
116 |
|
117 |
api_token = os.environ.get('HUGGINGFACEHUB_API_TOKEN')
|
118 |
|
|
|
122 |
task="conversational",
|
123 |
model_kwargs={"temperature": 0.8, "max_length": 1000},
|
124 |
)
|
|
|
125 |
template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer. Use three sentences maximum. Keep the answer as concise as possible. Always say "thanks for asking!" at the end of the answer.
|
126 |
{context}
|
127 |
Question: {question}
|
|
|
129 |
QA_CHAIN_PROMPT = PromptTemplate.from_template(template)
|
130 |
|
131 |
def load_db(file, k):
|
132 |
+
# load documents
|
133 |
loader = PyPDFLoader(file)
|
134 |
documents = loader.load()
|
135 |
+
# split documents
|
136 |
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=150)
|
137 |
docs = text_splitter.split_documents(documents)
|
138 |
+
# define embedding
|
139 |
embeddings = HuggingFaceEmbeddings()
|
140 |
+
# create vector database from data
|
141 |
+
db = vectorstores.FAISS.from_documents(docs, embeddings)
|
142 |
+
# define retriever
|
143 |
retriever = db.as_retriever(search_type="similarity", search_kwargs={"k": k})
|
144 |
+
# create a chatbot chain. Memory is managed externally.
|
145 |
+
question_generator_chain = LLMChain(llm=model, prompt=QA_CHAIN_PROMPT)
|
146 |
+
|
147 |
qa = ConversationalRetrievalChain.from_llm(
|
148 |
llm=model,
|
149 |
chain_type="stuff",
|
|
|
151 |
return_source_documents=True,
|
152 |
return_generated_question=True,
|
153 |
)
|
154 |
+
|
155 |
return qa
|
156 |
|
157 |
chat_history = [] # initialize chat history
|
158 |
|
159 |
+
def chatbot_conversation(interim_responses, pdf_file):
|
160 |
global chat_history
|
161 |
+
question = interim_responses[-1]["message"] if interim_responses else "Hello"
|
162 |
+
a = load_db(pdf_file, 3)
|
163 |
+
r = a.invoke({"question": question, "chat_history": chat_history})
|
164 |
+
match = re.search(r'Helpful Answer:(.*)', r['answer'])
|
165 |
+
if match:
|
166 |
+
helpful_answer = match.group(1).strip()
|
167 |
+
# Extend chat history with the current question and answer
|
168 |
+
chat_history.extend([(question, helpful_answer)])
|
169 |
+
return helpful_answer, None
|
170 |
+
else:
|
171 |
+
return "No helpful answer found.", None
|
172 |
+
|
173 |
+
iface = gr.ChatInterface(fn=chatbot_conversation, inputs=["text", "file"], outputs="text")
|
|
|
|
|
|
|
|
|
174 |
iface.launch(share=True)
|