CHATVLD / app.py
ChatVLD's picture
Update app.py
73633bf verified
# ================================
# ChatVLD Futurista - Gradio 5.44.0
# ================================
import os
from pathlib import Path
import requests
import gradio as gr
import time
from langchain_groq import ChatGroq
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.messages import AIMessage, HumanMessage
from langchain_community.document_loaders import PyMuPDFLoader
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_huggingface import HuggingFaceEmbeddings
from langchain_community.vectorstores import FAISS
from langchain.chains import create_history_aware_retriever, create_retrieval_chain
from langchain.chains.combine_documents import create_stuff_documents_chain
# ================================
# API KEY
# ================================
if "GROQ_API_KEY" not in os.environ:
raise ValueError("❌ A variável de ambiente GROQ_API_KEY não está definida.")
# ================================
# PDFs
# ================================
pdf_urls = {
"Codeline SS5632": "https://drive.google.com/uc?id=1s1OPWbxxu8ADBQBjmTfPe1tj-aLcEEIH",
"Linx 7900": "https://drive.google.com/uc?id=1GVbPq8SDriIS5CQo0kT0EZEqwWwjGJmY"
}
for pdf_name, pdf_url in pdf_urls.items():
pdf_path = f"{pdf_name}.pdf"
if not Path(pdf_path).exists():
r = requests.get(pdf_url)
with open(pdf_path, "wb") as f:
f.write(r.content)
# ================================
# CONFIG
# ================================
ID_MODEL = "deepseek-r1-distill-llama-70b"
TEMPERATURE = 0.7
# ================================
# FUNÇÕES
# ================================
def load_llm(model_id, temperature):
return ChatGroq(
model=model_id,
temperature=temperature,
groq_api_key=os.environ["GROQ_API_KEY"],
)
def extract_text_pdf(file_path):
loader = PyMuPDFLoader(file_path)
docs = loader.load()
return "\n".join([page.page_content for page in docs])
def config_retriever(pdf_files, nome_impressora):
embeddings = HuggingFaceEmbeddings(model_name="BAAI/bge-m3")
faiss_path = f"index_faiss_{nome_impressora.replace(' ', '_')}"
if Path(faiss_path).exists():
vectorstore = FAISS.load_local(faiss_path, embeddings, allow_dangerous_deserialization=True)
else:
all_texts = ""
for file_path in pdf_files:
all_texts += extract_text_pdf(file_path) + "\n"
splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
chunks = splitter.split_text(all_texts)
vectorstore = FAISS.from_texts(chunks, embeddings)
vectorstore.save_local(faiss_path)
return vectorstore.as_retriever(search_type='mmr', search_kwargs={'k': 3, 'fetch_k': 4})
def config_rag_chain(llm, retriever):
context_q_prompt = ChatPromptTemplate.from_messages([
("system","Dada a conversa e a pergunta, formule uma pergunta independente."),
MessagesPlaceholder("chat_history"),
("human", "Pergunta: {input}"),
])
hist_aware_retriever = create_history_aware_retriever(
llm=llm, retriever=retriever, prompt=context_q_prompt
)
system_prompt = "Você é um assistente virtual futurista da empresa VLD. Responda de forma clara e objetiva em português. Se não souber, diga que não sabe."
qa_prompt = ChatPromptTemplate.from_messages([
("system", system_prompt),
MessagesPlaceholder("chat_history"),
("human", "Pergunta: {input}\n\nContexto: {context}"),
])
qa_chain = create_stuff_documents_chain(llm, qa_prompt)
return create_retrieval_chain(hist_aware_retriever, qa_chain)
# ================================
# VARIÁVEIS GLOBAIS
# ================================
llm = load_llm(ID_MODEL, TEMPERATURE)
retrievers_cache = {}
chains_cache = {}
chat_history = [AIMessage(content="🚀 Olá, sou o seu suporte virtual futurista! Como posso te ajudar?")]
for nome in pdf_urls.keys():
retrievers_cache[nome] = config_retriever([f"{nome}.pdf"], nome)
chains_cache[nome] = config_rag_chain(llm, retrievers_cache[nome])
current_chain = chains_cache["Codeline SS5632"]
def set_impressora(nome_impressora):
global current_chain
current_chain = chains_cache[nome_impressora]
return f"📂 Impressora selecionada: {nome_impressora}"
def responder(pergunta):
global current_chain
if current_chain is None:
return "⚠️ Escolha primeiro a impressora."
chat_history.append(HumanMessage(content=pergunta))
try:
resposta = current_chain.invoke({"input": pergunta, "chat_history": chat_history})["answer"]
resposta = resposta.split("</think>")[-1].strip() if "</think>" in resposta else resposta.strip()
except Exception as e:
resposta = f"❌ Erro: {str(e)}"
chat_history.append(AIMessage(content=resposta))
return resposta
# ================================
# FALA DO CALANGO CHICO
# ================================
def fala_calango(aba):
if aba == "CHATVLD":
return """
<div style="display:flex;flex-direction:column;align-items:flex-start;margin-bottom:20px;margin-left:20px;">
<div style="display:inline-block;background:#f1f1f1;border:2px solid #555;border-radius:15px;
padding:8px 15px;font-weight:bold;color:#333;font-size:14px;position:relative;margin-bottom:10px;">
Pronto pra tirar suas dúvidas sobre impressoras! 🖨️
<div style="content:'';position:absolute;bottom:-15px;left:25px;width:0;height:0;
border-left:10px solid transparent;border-right:10px solid transparent;
border-top:15px solid #555;"></div>
<div style="content:'';position:absolute;bottom:-13px;left:26px;width:0;height:0;
border-left:9px solid transparent;border-right:9px solid transparent;
border-top:13px solid #f1f1f1;"></div>
</div>
<img src="https://raw.githubusercontent.com/aislantavares329-eng/chatvld/main/mascote.png" width="120">
</div>
"""
else:
return """
<div style="display:flex;flex-direction:column;align-items:flex-start;margin-bottom:20px;margin-left:20px;">
<div style="display:inline-block;background:#f1f1f1;border:2px solid #555;border-radius:15px;
padding:8px 15px;font-weight:bold;color:#333;font-size:14px;position:relative;margin-bottom:10px;">
Aqui eu te levo pro portal futurista! 🌐
<div style="content:'';position:absolute;bottom:-15px;left:25px;width:0;height:0;
border-left:10px solid transparent;border-right:10px solid transparent;
border-top:15px solid #555;"></div>
<div style="content:'';position:absolute;bottom:-13px;left:26px;width:0;height:0;
border-left:9px solid transparent;border-right:9px solid transparent;
border-top:13px solid #f1f1f1;"></div>
</div>
<img src="https://raw.githubusercontent.com/aislantavares329-eng/chatvld/main/mascote.png" width="120">
</div>
"""
# ================================
# CSS FUTURISTA
# ================================
custom_css = """
.gradio-chatbot { background-color: #f8f9fa; color: #111; }
.gradio-textbox textarea { background-color: #1c1c1c; color: #fff; border-radius: 8px; border: 1px solid #333; padding: 8px; }
.gradio-button, .gradio-button:active { background: linear-gradient(to right, #00c6ff, #0072ff); color: #fff; border: none; border-radius: 8px; }
#titulo-principal {
text-align: center;
font-size: 40px;
font-weight: bold;
background: linear-gradient(90deg, #00c6ff, #8a2be2, #ff0080, #00c6ff);
-webkit-background-clip: text;
-webkit-text-fill-color: transparent;
animation: brilho 5s linear infinite;
margin-bottom: 20px;
}
@keyframes brilho {
0% { background-position: 0% 50%; }
50% { background-position: 100% 50%; }
100% { background-position: 0% 50%; }
}
"""
# ================================
# INTERFACE GRADIO
# ================================
with gr.Blocks(css=custom_css, theme="soft") as iface:
gr.Markdown("🛸 SUPORTE AUTOMATIZADO", elem_id="titulo-principal")
fala_box = gr.HTML(fala_calango("CHATVLD"))
with gr.Tabs() as tabs:
# Aba Chat Futurista
with gr.TabItem("💭 CHATVLD") as chat_tab:
gr.Markdown("📌 **Por gentileza, escolha a impressora que deseja consultar.**")
impressora_select = gr.Dropdown(
choices=list(pdf_urls.keys()),
label="Selecione a impressora",
value="Codeline SS5632"
)
status_box = gr.Textbox(label="Status", interactive=False)
def troca_impressora(nome_impressora):
return "⏳ Montando base de conhecimento, aguarde...", set_impressora(nome_impressora)
impressora_select.change(
fn=troca_impressora,
inputs=impressora_select,
outputs=[status_box, status_box]
)
chatbot = gr.Chatbot(type="messages")
with gr.Row():
txt = gr.Textbox(placeholder="Diz o que tu quer macho...", show_label=False, lines=2)
submit_btn = gr.Button("🚀 Arrocha")
with gr.Row():
clear_btn = gr.Button("🧹 Barrer a prosa")
new_chat_btn = gr.Button("✨ Nova prosa")
def enviar(msg, history):
history.append({"role": "user", "content": msg})
yield history, ""
history.append({"role": "assistant", "content": "🤖 Digitando..."})
yield history, ""
resposta = responder(msg)
history[-1] = {"role": "assistant", "content": resposta}
yield history, ""
def limpar():
chat_history.clear()
return [], ""
def novo_chat():
chat_history.clear()
chat_history.append(AIMessage(content="🤖 Novo chat iniciado. Como posso te ajudar?"))
return [{"role": "assistant", "content": "🤖 Novo chat iniciado. Como posso te ajudar?"}], ""
txt.submit(enviar, [txt, chatbot], [chatbot, txt])
submit_btn.click(enviar, [txt, chatbot], [chatbot, txt])
clear_btn.click(limpar, outputs=[chatbot, txt])
new_chat_btn.click(novo_chat, outputs=[chatbot, txt])
# Aba VALID NODE
with gr.TabItem("🌐 VALID N.O.D.E") as node_tab:
gr.Markdown("### Acesse o VALID NODE clicando no botão abaixo:")
gr.HTML('<button onclick="window.open(\'https://172.17.200.97\', \'_blank\')" '
'style="background:linear-gradient(to right,#00c6ff,#0072ff);color:#fff;border:none;'
'border-radius:8px;padding:10px 20px;font-size:16px;cursor:pointer;">🖥️ VALID N.O.D.E</button>')
# Atualiza a fala do Calango conforme a aba
def troca_aba(evt: gr.SelectData):
return fala_calango(evt.value.replace("💭 ","").replace("🌐 ",""))
tabs.select(fn=troca_aba, outputs=fala_box)
iface.launch()