lucIAna / app.py
Fecalisboa's picture
Update app.py
b91a4cd verified
raw
history blame contribute delete
No virus
16.2 kB
import gradio as gr
import os
from pathlib import Path
import re
from unidecode import unidecode
import chromadb
from langchain_community.vectorstores import FAISS, ScaNN, Milvus
from langchain_community.document_loaders import PyPDFLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.vectorstores import Chroma
from langchain.chains import ConversationalRetrievalChain
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.llms import HuggingFacePipeline
from langchain.chains import ConversationChain
from langchain.memory import ConversationBufferMemory
from langchain_community.llms import HuggingFaceEndpoint
import torch
import secrets
api_token = os.getenv("hf")
list_llm = ["Fecalisboa/Lu_model", "mistralai/Mistral-7B-Instruct-v0.3","unsloth/mistral-7b-v0.3" ]
list_llm_simple = [os.path.basename(llm) for llm in list_llm]
# Load PDF document and create doc splits
def load_doc(list_file_path, chunk_size, chunk_overlap):
loaders = [PyPDFLoader(x) for x in list_file_path]
pages = []
for loader in loaders:
pages.extend(loader.load())
text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
doc_splits = text_splitter.split_documents(pages)
return doc_splits
# Create vector database
def create_db(splits, collection_name, db_type):
embedding = HuggingFaceEmbeddings()
if db_type == "ChromaDB":
new_client = chromadb.EphemeralClient()
vectordb = Chroma.from_documents(
documents=splits,
embedding=embedding,
client=new_client,
collection_name=collection_name,
)
elif db_type == "FAISS":
vectordb = FAISS.from_documents(
documents=splits,
embedding=embedding
)
elif db_type == "ScaNN":
vectordb = ScaNN.from_documents(
documents=splits,
embedding=embedding
)
elif db_type == "Milvus":
vectordb = Milvus.from_documents(
documents=splits,
embedding=embedding,
collection_name=collection_name,
)
else:
raise ValueError(f"Unsupported vector database type: {db_type}")
return vectordb
# Initialize langchain LLM chain
def initialize_llmchain(llm_model, temperature, max_tokens, top_k, vector_db, initial_prompt, progress=gr.Progress()):
progress(0.1, desc="Initializing HF tokenizer...")
progress(0.5, desc="Initializing HF Hub...")
llm = HuggingFaceEndpoint(
repo_id=llm_model,
huggingfacehub_api_token=api_token,
temperature=temperature,
max_new_tokens=max_tokens,
top_k=top_k,
)
progress(0.75, desc="Defining buffer memory...")
memory = ConversationBufferMemory(
memory_key="chat_history",
output_key='answer',
return_messages=True
)
retriever = vector_db.as_retriever()
progress(0.8, desc="Defining retrieval chain...")
qa_chain = ConversationalRetrievalChain.from_llm(
llm,
retriever=retriever,
chain_type="stuff",
memory=memory,
return_source_documents=True,
verbose=False,
)
qa_chain({"question": initial_prompt}) # Initialize with the initial prompt
progress(0.9, desc="Done!")
return qa_chain
# Generate collection name for vector database
def create_collection_name(filepath):
collection_name = Path(filepath).stem
collection_name = collection_name.replace(" ", "-")
collection_name = unidecode(collection_name)
collection_name = re.sub('[^A-Za-z0-9]+', '-', collection_name)
collection_name = collection_name[:50]
if len(collection_name) < 3:
collection_name = collection_name + 'xyz'
if not collection_name[0].isalnum():
collection_name = 'A' + collection_name[1:]
if not collection_name[-1].isalnum():
collection_name = collection_name[:-1] + 'Z'
print('Filepath: ', filepath)
print('Collection name: ', collection_name)
return collection_name
# Initialize database
def initialize_database(list_file_obj, chunk_size, chunk_overlap, db_type, progress=gr.Progress()):
list_file_path = [x.name for x in list_file_obj if x is not None]
progress(0.1, desc="Creating collection name...")
collection_name = create_collection_name(list_file_path[0])
progress(0.25, desc="Loading document...")
doc_splits = load_doc(list_file_path, chunk_size, chunk_overlap)
progress(0.5, desc="Generating vector database...")
vector_db = create_db(doc_splits, collection_name, db_type)
progress(0.9, desc="Done!")
return vector_db, collection_name, "Complete!"
def initialize_LLM(llm_option, llm_temperature, max_tokens, top_k, vector_db, initial_prompt, progress=gr.Progress()):
llm_name = list_llm[llm_option]
print("llm_name: ", llm_name)
qa_chain = initialize_llmchain(llm_name, llm_temperature, max_tokens, top_k, vector_db, initial_prompt, progress)
return qa_chain, "Complete!"
def format_chat_history(message, chat_history):
formatted_chat_history = []
for user_message, bot_message in chat_history:
formatted_chat_history.append(f"User: {user_message}")
formatted_chat_history.append(f"Assistant: {bot_message}")
return formatted_chat_history
def conversation(qa_chain, message, history):
formatted_chat_history = format_chat_history(message, history)
response = qa_chain({"question": message, "chat_history": formatted_chat_history})
response_answer = response["answer"]
if "Helpful Answer:" in response_answer:
response_answer = response_answer.split("Helpful Answer:")[-1]
response_sources = response["source_documents"]
response_source1 = response_sources[0].page_content.strip()
response_source2 = response_sources[1].page_content.strip()
response_source3 = response_sources[2].page_content.strip()
response_source1_page = response_sources[0].metadata["page"] + 1
response_source2_page = response_sources[1].metadata["page"] + 1
response_source3_page = response_sources[2].metadata["page"] + 1
new_history = history + [(message, response_answer)]
return qa_chain, gr.update(value=""), new_history, response_source1, response_source1_page, response_source2, response_source2_page, response_source3, response_source3_page
def initialize_llm_no_doc(llm_model, temperature, max_tokens, top_k, initial_prompt, progress=gr.Progress()):
progress(0.1, desc="Initializing HF tokenizer...")
progress(0.5, desc="Initializing HF Hub...")
llm = HuggingFaceEndpoint(
repo_id=llm_model,
huggingfacehub_api_token=api_token,
temperature=temperature,
max_new_tokens=max_tokens,
top_k=top_k,
)
progress(0.75, desc="Defining buffer memory...")
memory = ConversationBufferMemory(
memory_key="chat_history",
output_key='answer',
return_messages=True
)
conversation_chain = ConversationChain(llm=llm, memory=memory, verbose=False)
conversation_chain({"question": initial_prompt})
progress(0.9, desc="Done!")
return conversation_chain
def conversation_no_doc(llm, message, history):
formatted_chat_history = format_chat_history(message, history)
response = llm({"question": message, "chat_history": formatted_chat_history})
response_answer = response["answer"]
new_history = history + [(message, response_answer)]
return llm, gr.update(value=""), new_history
def upload_file(file_obj):
list_file_path = []
for file in file_obj:
list_file_path.append(file.name)
return list_file_path
def demo():
with gr.Blocks(theme="base") as demo:
vector_db = gr.State()
qa_chain = gr.State()
collection_name = gr.State()
initial_prompt = gr.State("")
llm_no_doc = gr.State()
gr.Markdown(
"""<center><h2>lucIAna</center></h2>
<h3>Olá, sou a 2. versão</h3>""")
gr.Markdown(
"""<b>Note:</b> Esta é a lucIAna, primeira Versão da IA para seus PDF documentos.
Este chatbot leva em consideração perguntas anteriores ao gerar respostas (por meio de memória conversacional) e inclui referências a documentos para fins de clareza.
""")
with gr.Tab("Step 1 - Upload PDF"):
with gr.Row():
document = gr.Files(height=100, file_count="multiple", file_types=["pdf"], interactive=True, label="Upload your PDF documents (single or multiple)")
with gr.Tab("Step 2 - Process document"):
with gr.Row():
db_type_radio = gr.Radio(["ChromaDB", "FAISS", "ScaNN", "Milvus"], label="Vector database type", value="ChromaDB", type="value", info="Choose your vector database")
with gr.Accordion("Advanced options - Document text splitter", open=False):
with gr.Row():
slider_chunk_size = gr.Slider(minimum=100, maximum=1000, value=600, step=20, label="Chunk size", info="Chunk size", interactive=True)
with gr.Row():
slider_chunk_overlap = gr.Slider(minimum=10, maximum=200, value=40, step=10, label="Chunk overlap", info="Chunk overlap", interactive=True)
with gr.Row():
db_progress = gr.Textbox(label="Vector database initialization", value="None")
with gr.Row():
db_btn = gr.Button("Generate vector database")
with gr.Tab("Step 3 - Set Initial Prompt"):
with gr.Row():
prompt_input = gr.Textbox(label="Initial Prompt", lines=5, value="Você é um advogado sênior, onde seu papel é analisar e trazer as informações sem inventar, dando a sua melhor opinião sempre trazendo contexto e referência. Aprenda o que é jurisprudência.")
with gr.Row():
set_prompt_btn = gr.Button("Set Prompt")
with gr.Tab("Step 4 - Initialize QA chain"):
with gr.Row():
llm_btn = gr.Radio(list_llm_simple,
label="LLM models", value=list_llm_simple[0], type="index", info="Choose your LLM model")
with gr.Accordion("Advanced options - LLM model", open=False):
with gr.Row():
slider_temperature = gr.Slider(minimum=0.01, maximum=1.0, value=0.7, step=0.1, label="Temperature", info="Model temperature", interactive=True)
with gr.Row():
slider_maxtokens = gr.Slider(minimum=224, maximum=4096, value=1024, step=32, label="Max Tokens", info="Model max tokens", interactive=True)
with gr.Row():
slider_topk = gr.Slider(minimum=1, maximum=10, value=3, step=1, label="top-k samples", info="Model top-k samples", interactive=True)
with gr.Row():
llm_progress = gr.Textbox(value="None", label="QA chain initialization")
with gr.Row():
qachain_btn = gr.Button("Initialize Question Answering chain")
with gr.Tab("Step 5 - Chatbot with document"):
chatbot = gr.Chatbot(height=300)
with gr.Accordion("Advanced - Document references", open=False):
with gr.Row():
doc_source1 = gr.Textbox(label="Reference 1", lines=2, container=True, scale=20)
source1_page = gr.Number(label="Page", scale=1)
with gr.Row():
doc_source2 = gr.Textbox(label="Reference 2", lines=2, container=True, scale=20)
source2_page = gr.Number(label="Page", scale=1)
with gr.Row():
doc_source3 = gr.Textbox(label="Reference 3", lines=2, container=True, scale=20)
source3_page = gr.Number(label="Page", scale=1)
with gr.Row():
msg = gr.Textbox(placeholder="Type message (e.g. 'What is this document about?')", container=True)
with gr.Row():
submit_btn = gr.Button("Submit message")
clear_btn = gr.ClearButton([msg, chatbot], value="Clear conversation")
with gr.Tab("Step 6 - Chatbot without document"):
chatbot_no_doc = gr.Chatbot(height=300)
with gr.Row():
msg_no_doc = gr.Textbox(placeholder="Type message to chat with lucIAna", container=True)
with gr.Row():
submit_btn_no_doc = gr.Button("Submit message")
clear_btn_no_doc = gr.ClearButton([msg_no_doc, chatbot_no_doc], value="Clear conversation")
# Preprocessing events
db_btn.click(initialize_database,
inputs=[document, slider_chunk_size, slider_chunk_overlap, db_type_radio],
outputs=[vector_db, collection_name, db_progress])
set_prompt_btn.click(lambda prompt: prompt,
inputs=prompt_input,
outputs=initial_prompt)
qachain_btn.click(initialize_LLM,
inputs=[llm_btn, slider_temperature, slider_maxtokens, slider_topk, vector_db, initial_prompt],
outputs=[qa_chain, llm_progress]).then(lambda:[None,"",0,"",0,"",0],
inputs=None,
outputs=[chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page],
queue=False)
# Chatbot events with document
msg.submit(conversation,
inputs=[qa_chain, msg, chatbot],
outputs=[qa_chain, msg, chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page],
queue=False)
submit_btn.click(conversation,
inputs=[qa_chain, msg, chatbot],
outputs=[qa_chain, msg, chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page],
queue=False)
clear_btn.click(lambda:[None,"",0,"",0,"",0],
inputs=None,
outputs=[chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page],
queue=False)
# Chatbot events without document
submit_btn_no_doc.click(conversation_no_doc,
inputs=[llm_no_doc, msg_no_doc, chatbot_no_doc],
outputs=[llm_no_doc, msg_no_doc, chatbot_no_doc],
queue=False)
clear_btn_no_doc.click(lambda:[None,""],
inputs=None,
outputs=[chatbot_no_doc, msg_no_doc],
queue=False)
# Initialize LLM without document for conversation
with gr.Tab("Initialize LLM for Chatbot without document"):
with gr.Row():
llm_no_doc_btn = gr.Radio(list_llm_simple,
label="LLM models", value=list_llm_simple[0], type="index", info="Choose your LLM model for chatbot without document")
with gr.Accordion("Advanced options - LLM model", open=False):
with gr.Row():
slider_temperature_no_doc = gr.Slider(minimum=0.01, maximum=1.0, value=0.7, step=0.1, label="Temperature", info="Model temperature", interactive=True)
with gr.Row():
slider_maxtokens_no_doc = gr.Slider(minimum=224, maximum=4096, value=1024, step=32, label="Max Tokens", info="Model max tokens", interactive=True)
with gr.Row():
slider_topk_no_doc = gr.Slider(minimum=1, maximum=10, value=3, step=1, label="top-k samples", info="Model top-k samples", interactive=True)
with gr.Row():
llm_no_doc_progress = gr.Textbox(value="None", label="LLM initialization for chatbot without document")
with gr.Row():
llm_no_doc_init_btn = gr.Button("Initialize LLM for Chatbot without document")
llm_no_doc_init_btn.click(initialize_llm_no_doc,
inputs=[llm_no_doc_btn, slider_temperature_no_doc, slider_maxtokens_no_doc, slider_topk_no_doc, initial_prompt],
outputs=[llm_no_doc, llm_no_doc_progress])
demo.queue().launch(debug=True, share=True)
if __name__ == "__main__":
demo()