Files changed (1) hide show
  1. app.py +49 -44
app.py CHANGED
@@ -1,61 +1,66 @@
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
3
- from sentence_transformers import SentenceTransformer, util
4
  from transformers import pipeline
 
5
 
6
- # Modelos
7
- chat_model_zephyr = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
- chat_model_gpt2 = pipeline("text-generation", model="gpt2", max_new_tokens=100)
9
-
10
- # Similaridade
11
- similarity_model = SentenceTransformer("all-MiniLM-L6-v2")
12
-
13
-
14
- def get_zephyr_response(question):
15
- messages = [
16
- {"role": "system", "content": "You are a helpful assistant."},
17
- {"role": "user", "content": question}
18
- ]
19
- response = chat_model_zephyr.chat_completion(
20
- messages,
21
- max_tokens=256,
22
- temperature=0.7,
23
- top_p=0.95,
24
- )
25
- return response.choices[0].message.content.strip()
26
-
27
 
28
- def get_gpt2_response(question):
29
- generated = chat_model_gpt2(question)[0]["generated_text"]
30
- return generated.strip()
 
 
 
 
 
 
31
 
 
 
 
 
 
 
 
 
32
 
33
- def compare_answers(answer1, answer2):
34
- emb1 = similarity_model.encode(answer1, convert_to_tensor=True)
35
- emb2 = similarity_model.encode(answer2, convert_to_tensor=True)
36
- similarity = util.cos_sim(emb1, emb2).item()
37
- return round(similarity, 3)
38
 
 
 
 
 
39
 
40
- def respond(question):
41
- answer_zephyr = get_zephyr_response(question)
42
- answer_gpt2 = get_gpt2_response(question)
43
- similarity = compare_answers(answer_zephyr, answer_gpt2)
44
 
45
- return (
46
- f"🧠 Zephyr-7b:\n{answer_zephyr}\n\n"
47
- f"🤖 GPT-2:\n{answer_gpt2}\n\n"
48
- f"🔍 Similaridade Semântica: **{similarity}**"
49
- )
50
 
 
51
 
52
  with gr.Blocks() as demo:
53
- gr.Markdown("# 🤖 Comparador de Respostas (sem contexto)\nDigite uma pergunta e veja as respostas de dois modelos.")
54
- question = gr.Textbox(label="Pergunta")
55
- submit = gr.Button("Comparar Respostas")
56
  output = gr.Textbox(label="Respostas e Similaridade", lines=15)
 
57
 
58
- submit.click(respond, inputs=question, outputs=output)
 
 
 
 
59
 
60
  if __name__ == "__main__":
61
  demo.launch()
 
1
  import gradio as gr
 
 
2
  from transformers import pipeline
3
+ from sentence_transformers import SentenceTransformer, util
4
 
5
+ class ModelComparator:
6
+ def __init__(self):
7
+ # Modelo de QA (mais rápido e leve)
8
+ self.qa_pipeline = pipeline("question-answering", model="distilbert-base-uncased-distilled-squad")
9
+ # Modelo de geração de texto simples
10
+ self.text_gen_pipeline = pipeline("text-generation", model="gpt2", max_new_tokens=50)
11
+ # Modelo para embeddings e similaridade
12
+ self.sim_model = SentenceTransformer("all-MiniLM-L6-v2")
 
 
 
 
 
 
 
 
 
 
 
 
 
13
 
14
+ def get_qa_answer(self, question, context=None):
15
+ # Se não passar contexto, responde "não sei"
16
+ if context is None:
17
+ return "No context provided for QA model."
18
+ try:
19
+ result = self.qa_pipeline(question=question, context=context)
20
+ return result['answer']
21
+ except Exception as e:
22
+ return f"Error in QA pipeline: {e}"
23
 
24
+ def get_text_gen_answer(self, prompt):
25
+ try:
26
+ generated = self.text_gen_pipeline(prompt)[0]['generated_text']
27
+ # O GPT2 gera o texto incluindo o prompt, vamos remover o prompt para deixar só resposta
28
+ answer = generated[len(prompt):].strip()
29
+ return answer if answer else generated.strip()
30
+ except Exception as e:
31
+ return f"Error in text generation pipeline: {e}"
32
 
33
+ def compare_answers(self, answer1, answer2):
34
+ emb1 = self.sim_model.encode(answer1, convert_to_tensor=True)
35
+ emb2 = self.sim_model.encode(answer2, convert_to_tensor=True)
36
+ similarity = util.cos_sim(emb1, emb2).item()
37
+ return round(similarity, 3)
38
 
39
+ def respond(self, question, context):
40
+ qa_answer = self.get_qa_answer(question, context)
41
+ gen_answer = self.get_text_gen_answer(question)
42
+ similarity = self.compare_answers(qa_answer, gen_answer)
43
 
44
+ return (f"Model QA answer:\n{qa_answer}\n\n"
45
+ f"Model GPT-2 generated answer:\n{gen_answer}\n\n"
46
+ f"Semantic similarity score: {similarity}")
 
47
 
48
+ # Interface Gradio
 
 
 
 
49
 
50
+ model_comparator = ModelComparator()
51
 
52
  with gr.Blocks() as demo:
53
+ gr.Markdown("## Comparador de respostas entre dois modelos locais (CPU)")
54
+ question_input = gr.Textbox(label="Pergunta")
55
+ context_input = gr.Textbox(label="Contexto para o modelo de QA (opcional)", lines=5)
56
  output = gr.Textbox(label="Respostas e Similaridade", lines=15)
57
+ btn = gr.Button("Comparar")
58
 
59
+ btn.click(
60
+ fn=model_comparator.respond,
61
+ inputs=[question_input, context_input],
62
+ outputs=output
63
+ )
64
 
65
  if __name__ == "__main__":
66
  demo.launch()