osellight's picture
Update app.py
fb9d0dd
raw
history blame
2.75 kB
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from langchain.text_splitter import CharacterTextSplitter
from langchain.chains.question_answering import load_qa_chain
from langchain.llms import OpenAI
from dotenv import load_dotenv
import logging
import os
import subprocess
# # Load environment variables from .env file
# load_dotenv()
# # Access environment variables
# OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
# SERPAPI_API_KEY = os.getenv("SERPAPI_API_KEY")
subprocess.run(["git", "clone", "https://github.com/TheMITTech/shakespeare"], check=True)
from glob import glob
files = glob("./shakespeare/**/*.html")
import shutil
import os
os.mkdir('./data')
destination_folder = './data/'
for html_file in files:
shutil.move(html_file, destination_folder + html_file.split("/")[-1])
from langchain.document_loaders import BSHTMLLoader, DirectoryLoader
from bs4 import BeautifulSoup
bshtml_dir_loader = DirectoryLoader('./data/', loader_cls=BSHTMLLoader)
data = bshtml_dir_loader.load()
from transformers import AutoTokenizer
bloomz_tokenizer = AutoTokenizer.from_pretrained("bigscience/bloomz-1b7")
from langchain.text_splitter import CharacterTextSplitter
text_splitter = CharacterTextSplitter.from_huggingface_tokenizer(bloomz_tokenizer, chunk_size=100, chunk_overlap=0, separator="\n")
documents = text_splitter.split_documents(data)
from langchain.embeddings import HuggingFaceEmbeddings
embeddings = HuggingFaceEmbeddings()
from langchain.vectorstores import Chroma
persist_directory = "vector_db"
vectordb = Chroma.from_documents(documents=documents, embedding=embeddings, persist_directory=persist_directory)
vectordb.persist()
vectordb = None
vectordb = Chroma(persist_directory=persist_directory, embedding_function=embeddings)
from langchain import HuggingFacePipeline
llm = HuggingFacePipeline.from_model_id(
model_id="bigscience/bloomz-1b7",
task="text-generation",
model_kwargs={"temperature" : 0, "max_length" : 500})
doc_retriever = vectordb.as_retriever()
from langchain.chains import RetrievalQA
shakespeare_qa = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=doc_retriever)
def make_inference(query):
logging.info(query)
return(shakespeare_qa.run(query))
if __name__ == "__main__":
# make a gradio interface
import gradio as gr
gr.Interface(
make_inference,
[
gr.inputs.Textbox(lines=2, label="Query"),
],
gr.outputs.Textbox(label="Response"),
title="🗣️QuestionMyDoc-Bloomz1b7📄",
description="🗣️QuestionMyDoc-Bloomz1b7📄 is a tool that allows you to ask questions about a document. In this case - Shakespears.",
).launch()