ermu2001 commited on
Commit
afc99d0
1 Parent(s): edb9f39

Update tasks/eval/model_utils.py

Browse files
Files changed (1) hide show
  1. tasks/eval/model_utils.py +4 -3
tasks/eval/model_utils.py CHANGED
@@ -157,8 +157,7 @@ def pllava_answer(conv: Conversation, model, processor, img_list, do_sample=True
157
  top_p=top_p, repetition_penalty=repetition_penalty, length_penalty=length_penalty, temperature=temperature,
158
  stopping_criteria=stopping_criteria,)
159
  output_text = processor.batch_decode(output_token, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
160
- if "###" in output_text:
161
- output_text = "###".join(output_text.split('###')[:-1]) # remove the stop sign '###'
162
  if print_res: # debug usage
163
  print('### PROMPTING LM WITH: ', prompt)
164
  print('### LM OUTPUT TEXT: ', output_text)
@@ -166,7 +165,9 @@ def pllava_answer(conv: Conversation, model, processor, img_list, do_sample=True
166
  split_tag = "<|im_start|> assistant\n"
167
  else:
168
  split_tag = conv.roles[-1]
169
- output_text = output_text.split(split_tag)[-1].rstrip(conv.sep if isinstance(conv.sep, str) else conv.sep[1]).strip()
 
 
170
  conv.messages[-1][1] = output_text
171
  return output_text, conv
172
 
 
157
  top_p=top_p, repetition_penalty=repetition_penalty, length_penalty=length_penalty, temperature=temperature,
158
  stopping_criteria=stopping_criteria,)
159
  output_text = processor.batch_decode(output_token, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
160
+
 
161
  if print_res: # debug usage
162
  print('### PROMPTING LM WITH: ', prompt)
163
  print('### LM OUTPUT TEXT: ', output_text)
 
165
  split_tag = "<|im_start|> assistant\n"
166
  else:
167
  split_tag = conv.roles[-1]
168
+ output_text = output_text.split(split_tag)[-1]
169
+ ending = conv.sep if isinstance(conv.sep, str) else conv.sep[1]
170
+ output_text = output_text.removesuffix(ending)
171
  conv.messages[-1][1] = output_text
172
  return output_text, conv
173