Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -5,7 +5,7 @@ from transformers import pipeline
5
 
6
  # Modelos
7
  chat_model_zephyr = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
- chat_model_gemma = pipeline("text-generation", model="declare-lab/gemma-v2", max_new_tokens=256)
9
 
10
  # Similaridade
11
  similarity_model = SentenceTransformer("all-MiniLM-L6-v2")
@@ -25,8 +25,8 @@ def get_zephyr_response(question):
25
  return response.choices[0].message.content.strip()
26
 
27
 
28
- def get_gemma_response(question):
29
- generated = chat_model_gemma(question)[0]["generated_text"]
30
  return generated.strip()
31
 
32
 
@@ -39,12 +39,12 @@ def compare_answers(answer1, answer2):
39
 
40
  def respond(question):
41
  answer_zephyr = get_zephyr_response(question)
42
- answer_gemma = get_gemma_response(question)
43
- similarity = compare_answers(answer_zephyr, answer_gemma)
44
 
45
  return (
46
  f"🧠 Zephyr-7b:\n{answer_zephyr}\n\n"
47
- f"🤖 Gemma-v2:\n{answer_gemma}\n\n"
48
  f"🔍 Similaridade Semântica: **{similarity}**"
49
  )
50
 
 
5
 
6
  # Modelos
7
  chat_model_zephyr = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
+ chat_model_gpt2 = pipeline("text-generation", model="gpt2", max_new_tokens=100)
9
 
10
  # Similaridade
11
  similarity_model = SentenceTransformer("all-MiniLM-L6-v2")
 
25
  return response.choices[0].message.content.strip()
26
 
27
 
28
+ def get_gpt2_response(question):
29
+ generated = chat_model_gpt2(question)[0]["generated_text"]
30
  return generated.strip()
31
 
32
 
 
39
 
40
  def respond(question):
41
  answer_zephyr = get_zephyr_response(question)
42
+ answer_gpt2 = get_gpt2_response(question)
43
+ similarity = compare_answers(answer_zephyr, answer_gpt2)
44
 
45
  return (
46
  f"🧠 Zephyr-7b:\n{answer_zephyr}\n\n"
47
+ f"🤖 GPT-2:\n{answer_gpt2}\n\n"
48
  f"🔍 Similaridade Semântica: **{similarity}**"
49
  )
50