Spaces:
Sleeping
Sleeping
import gradio as gr | |
from transformers import pipeline | |
from sentence_transformers import SentenceTransformer, util | |
# Modelos A e B | |
generator_a = pipeline("text2text-generation", model="google/flan-t5-base", tokenizer="google/flan-t5-base") | |
generator_b = pipeline("text2text-generation", model="declare-lab/flan-alpaca-base", tokenizer="declare-lab/flan-alpaca-base") | |
# Árbitro baseado em similaridade semântica | |
similarity_model = SentenceTransformer("sentence-transformers/paraphrase-MiniLM-L6-v2") | |
def judge(prompt, response_a, response_b): | |
emb_prompt = similarity_model.encode(prompt, convert_to_tensor=True) | |
emb_a = similarity_model.encode(response_a, convert_to_tensor=True) | |
emb_b = similarity_model.encode(response_b, convert_to_tensor=True) | |
score_a = util.pytorch_cos_sim(emb_prompt, emb_a).item() | |
score_b = util.pytorch_cos_sim(emb_prompt, emb_b).item() | |
return response_a if score_a >= score_b else response_b | |
def chat(prompt): | |
response_a = generator_a(prompt, max_length=100)[0]['generated_text'] | |
response_b = generator_b(prompt, max_length=100)[0]['generated_text'] | |
best = judge(prompt, response_a, response_b) | |
return f"Resposta A:\n{response_a}\n\nResposta B:\n{response_b}\n\n✅ Melhor Resposta:\n{best}" | |
iface = gr.Interface(fn=chat, inputs=gr.Textbox(label="Pergunta"), outputs=gr.Textbox(label="Resposta Escolhida"), title="Chatbot em Cascata") | |
iface.launch() |