DarrenChensformer commited on
Commit
0a5ef0f
1 Parent(s): fdba9c1

[config] Add docstring

Browse files
Files changed (2) hide show
  1. README.md +4 -2
  2. eval_keyphrase.py +4 -10
README.md CHANGED
@@ -22,8 +22,10 @@ Metric to evaluate the text2text generation task, where the format is the sequen
22
  ## How to Use
23
  *Give general statement of how to use the metric*
24
 
25
- *Provide simplest possible example for using the metric*
26
-
 
 
27
  ### Inputs
28
  *List all input arguments in the format below*
29
  - **input_field** *(type): Definition of input, with explanation if necessary. State any default value(s).*
 
22
  ## How to Use
23
  *Give general statement of how to use the metric*
24
 
25
+ ```python
26
+ metric = evaluate.load("DarrenChensformer/eval_keyphrase")
27
+ results = metric.compute(references=[["Hello","World"]], predictions=[["hello","world"]])
28
+ ```
29
  ### Inputs
30
  *List all input arguments in the format below*
31
  - **input_field** *(type): Definition of input, with explanation if necessary. State any default value(s).*
eval_keyphrase.py CHANGED
@@ -49,10 +49,10 @@ Examples:
49
  Examples should be written in doctest format, and should illustrate how
50
  to use the function.
51
 
52
- >>> my_new_module = evaluate.load("my_new_module")
53
- >>> results = my_new_module.compute(references=[0, 1], predictions=[0, 1])
54
  >>> print(results)
55
- {'accuracy': 1.0}
56
  """
57
 
58
  # TODO: Define external resources urls if needed
@@ -128,10 +128,4 @@ class eval_keyphrase(evaluate.Metric):
128
  macro_metrics['num_pred'].append(len(total_preds))
129
  macro_metrics['num_gold'].append(len(total_tgt_set))
130
 
131
- return {
132
- "precision": round(sum(macro_metrics["precision"])/len(macro_metrics["precision"]), 4),
133
- "recall": round(sum(macro_metrics["recall"])/len(macro_metrics["recall"]), 4),
134
- "f1": round(sum(macro_metrics["f1"])/len(macro_metrics["f1"]), 4),
135
- "num_pred": round(sum(macro_metrics["num_pred"])/len(macro_metrics["num_pred"]), 4),
136
- "num_gold": round(sum(macro_metrics["num_gold"])/len(macro_metrics["num_gold"]), 4),
137
- }
 
49
  Examples should be written in doctest format, and should illustrate how
50
  to use the function.
51
 
52
+ >>> metric = evaluate.load("DarrenChensformer/eval_keyphrase")
53
+ >>> results = metric.compute(references=[["Hello","World"]], predictions=[["hello","world"]])
54
  >>> print(results)
55
+ {'precision': 1.0, 'recall': 1.0, 'f1': 1.0, 'num_pred': 2.0, 'num_gold': 2.0}
56
  """
57
 
58
  # TODO: Define external resources urls if needed
 
128
  macro_metrics['num_pred'].append(len(total_preds))
129
  macro_metrics['num_gold'].append(len(total_tgt_set))
130
 
131
+ return { k: round(sum(v)/len(v), 4) for k, v in macro_metrics.items()}