PEFT
Safetensors
English
Spanish
nmarafo commited on
Commit
4a26e3e
1 Parent(s): 080ba0d

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +7 -11
README.md CHANGED
@@ -55,7 +55,7 @@ tokenizer = AutoTokenizer.from_pretrained(model_id)
55
 
56
  model = AutoPeftModelForCausalLM.from_pretrained(adapter, quantization_config=bnb_config, device_map={"":0})
57
 
58
- def predict(question, best_answer, student_answer, language, temperature, top_p, top_k):
59
  if language == "English":
60
  system_message = "Analyze the question, the expected answer, and the student's response. Determine if the student's answer is conceptually correct in relation to the expected answer, regardless of the exact wording. Return True if the student's answer is correct or False otherwise. Add a brief comment explaining the rationale behind the answer being correct or incorrect."
61
  else: # Asumimos que cualquier otra opción será Español
@@ -64,18 +64,17 @@ def predict(question, best_answer, student_answer, language, temperature, top_p,
64
  prompt = f"{system_message}\n\nQuestion: {question}\nExpected Answer: {best_answer}\nStudent Answer: {student_answer}"
65
  prompt_template=f"<start_of_turn>user\n{prompt}<end_of_turn>\n<start_of_turn>model"
66
 
 
67
  encoding = tokenizer(prompt_template, return_tensors='pt', padding=True, truncation=True, max_length=256)
68
  input_ids = encoding['input_ids'].cuda()
69
  attention_mask = encoding['attention_mask'].cuda()
70
 
71
  output = model.generate(input_ids, attention_mask=attention_mask,
72
- temperature=temperature, do_sample=True, top_p=top_p,
73
- top_k=top_k, max_new_tokens=256, pad_token_id=tokenizer.eos_token_id)
74
  response = tokenizer.decode(output[0], skip_special_tokens=True)
75
- pattern = r"\[INST\].*?\[\/INST\]\s*(True|False)"
76
- cleaned_response = re.sub(pattern, r"\1", response, flags=re.DOTALL)
77
 
78
- return
79
 
80
  import gradio as gr
81
 
@@ -85,14 +84,11 @@ iface = gr.Interface(
85
  gr.Textbox(lines=2, placeholder="Pregunta"),
86
  gr.Textbox(lines=2, placeholder="Mejor Respuesta"),
87
  gr.Textbox(lines=2, placeholder="Respuesta del Estudiante"),
88
- gr.Radio(choices=["English", "Español"], label="Idioma"),
89
- gr.Slider(minimum=0.1, maximum=1.0, step=0.1, value=0.5, label="Temperature"),
90
- gr.Slider(minimum=0.1, maximum=1.0, step=0.01, value=0.49, label="Top P"),
91
- gr.Slider(minimum=0, maximum=100, step=1, value=40, label="Top K")
92
  ],
93
  outputs=gr.Textbox(label="Respuesta del Modelo")
94
  )
95
- iface.launch(share=True,debug=True)
96
 
97
  ```
98
 
 
55
 
56
  model = AutoPeftModelForCausalLM.from_pretrained(adapter, quantization_config=bnb_config, device_map={"":0})
57
 
58
+ def predict(question, best_answer, student_answer, language):
59
  if language == "English":
60
  system_message = "Analyze the question, the expected answer, and the student's response. Determine if the student's answer is conceptually correct in relation to the expected answer, regardless of the exact wording. Return True if the student's answer is correct or False otherwise. Add a brief comment explaining the rationale behind the answer being correct or incorrect."
61
  else: # Asumimos que cualquier otra opción será Español
 
64
  prompt = f"{system_message}\n\nQuestion: {question}\nExpected Answer: {best_answer}\nStudent Answer: {student_answer}"
65
  prompt_template=f"<start_of_turn>user\n{prompt}<end_of_turn>\n<start_of_turn>model"
66
 
67
+ # Ajusta aquí para incluir attention_mask
68
  encoding = tokenizer(prompt_template, return_tensors='pt', padding=True, truncation=True, max_length=256)
69
  input_ids = encoding['input_ids'].cuda()
70
  attention_mask = encoding['attention_mask'].cuda()
71
 
72
  output = model.generate(input_ids, attention_mask=attention_mask,
73
+ temperature=0.5, do_sample=True, top_p=0.49,
74
+ top_k=40, max_new_tokens=256, pad_token_id=tokenizer.eos_token_id)
75
  response = tokenizer.decode(output[0], skip_special_tokens=True)
 
 
76
 
77
+ return response
78
 
79
  import gradio as gr
80
 
 
84
  gr.Textbox(lines=2, placeholder="Pregunta"),
85
  gr.Textbox(lines=2, placeholder="Mejor Respuesta"),
86
  gr.Textbox(lines=2, placeholder="Respuesta del Estudiante"),
87
+ gr.Radio(choices=["English", "Español"], label="Idioma")
 
 
 
88
  ],
89
  outputs=gr.Textbox(label="Respuesta del Modelo")
90
  )
91
+ iface.launch(share=True,debug=True)
92
 
93
  ```
94