|
import streamlit as st |
|
from langchain.embeddings import HuggingFaceInstructEmbeddings |
|
from langchain.vectorstores import FAISS |
|
from langchain.text_splitter import CharacterTextSplitter |
|
from langchain.document_loaders import DirectoryLoader, PyPDFLoader |
|
import os |
|
from PyPDF2 import PdfReader |
|
from transformers import pipeline |
|
from transformers import AutoModel |
|
from googletrans import Translator |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_pdf_text(folder_path): |
|
translator = Translator() |
|
text = "" |
|
|
|
for filename in os.listdir(folder_path): |
|
filepath = os.path.join(folder_path, filename) |
|
|
|
|
|
if os.path.isfile(filepath) and filename.lower().endswith(".pdf"): |
|
pdf_reader = PdfReader(filepath) |
|
for page in pdf_reader.pages: |
|
text += page.extract_text() |
|
|
|
text=text.replace("\n", " ") |
|
text=text.replace("- ", "") |
|
return translator.translate(text, dest ='en').text |
|
|
|
|
|
def get_text_chunks(text): |
|
|
|
text_splitter = CharacterTextSplitter( |
|
separator="\n", |
|
chunk_size=1000, |
|
chunk_overlap=200, |
|
length_function=len |
|
) |
|
chunks = text_splitter.split_text(text) |
|
return chunks |
|
|
|
|
|
def create_vectorstore_and_store(): |
|
folder_path = './files' |
|
pdf_text = get_pdf_text(folder_path) |
|
text_chunks = get_text_chunks(pdf_text) |
|
embeddings = HuggingFaceInstructEmbeddings(model_name="hkunlp/instructor-base") |
|
|
|
|
|
vectorstoreDB = FAISS.from_texts(texts=text_chunks,embedding=embeddings) |
|
|
|
save_directory = "Store" |
|
|
|
vectorstoreDB.save_local(save_directory) |
|
return None |
|
|
|
|
|
|
|
def get_vectorstore(): |
|
embeddings = HuggingFaceInstructEmbeddings(model_name="hkunlp/instructor-base") |
|
|
|
|
|
save_directory = "Store" |
|
vectorstoreDB = FAISS.load_local(save_directory, embeddings) |
|
return vectorstoreDB |
|
|
|
|
|
def get_llm_answer(user_question): |
|
|
|
|
|
|
|
|
|
translator = Translator() |
|
translator.translate(user_question, dest='en') |
|
retriever=get_vectorstore().as_retriever() |
|
retrieved_docs=retriever.invoke( |
|
user_question |
|
) |
|
|
|
context=""+retrieved_docs[0].page_content+retrieved_docs[1].page_content+retrieved_docs[2].page_content |
|
|
|
context=context.replace("\n", " ") |
|
context=context.replace("- ", "") |
|
|
|
|
|
qa_pipeline = pipeline("question-answering", model="deutsche-telekom/bert-multi-english-german-squad2", tokenizer="deutsche-telekom/bert-multi-english-german-squad2") |
|
|
|
|
|
answer = qa_pipeline(question=user_question, context=context, max_length=200) |
|
|
|
return translator.translate(answer["answer"],dest='de') |
|
|
|
def main(): |
|
st.set_page_config( |
|
page_title="Chatbot", |
|
layout="wide", |
|
initial_sidebar_state="expanded", |
|
) |
|
st.text("Chatbot Rene ist über Telegram erreichbar!") |
|
|
|
if __name__ == '__main__': |
|
main() |