shounakpaul95 commited on
Commit
77f5222
1 Parent(s): 5f345ed

Update eval_utils.py

Browse files
Files changed (1) hide show
  1. eval_utils.py +1 -1
eval_utils.py CHANGED
@@ -256,7 +256,7 @@ def evaluate_summ(gold_data, pred_data):
256
  pred_summaries.append(pred_summary)
257
 
258
 
259
- rl_evaluator = rouge.Rouge(metrics=['rouge-n','rouge-l'], max_n=2, limit_length=False, apply_avg=True)
260
  rl_scores = rl_evaluator.get_scores(pred_summaries, gold_summaries)
261
  print("Rouge:", {k:v['f'] for k,v in rl_scores.items()}, flush=True)
262
 
 
256
  pred_summaries.append(pred_summary)
257
 
258
 
259
+ rl_evaluator = rouge.Rouge(metrics=['rouge-l'], max_n=2, limit_length=False, apply_avg=True)
260
  rl_scores = rl_evaluator.get_scores(pred_summaries, gold_summaries)
261
  print("Rouge:", {k:v['f'] for k,v in rl_scores.items()}, flush=True)
262