xu1998hz commited on
Commit
f65783a
1 Parent(s): 0a0a3b8

Add some code instructions

Browse files
Files changed (1) hide show
  1. sescore.py +19 -22
sescore.py CHANGED
@@ -55,38 +55,35 @@ class robertaEncoder(BERTEncoder):
55
 
56
  # TODO: Add BibTeX citation
57
  _CITATION = """\
58
- @InProceedings{huggingface:module,
59
- title = {A great new module},
60
- authors={huggingface, Inc.},
61
- year={2020}
 
 
 
62
  }
63
  """
64
 
65
- # TODO: Add description of the module here
66
  _DESCRIPTION = """\
67
  SEScore is an evaluation metric that trys to compute an overall score to measure text generation quality.
68
  """
69
 
70
-
71
- # TODO: Add description of the arguments of the module here
72
  _KWARGS_DESCRIPTION = """
73
- Calculates how good are predictions given some references, using certain scores
74
  Args:
75
- predictions: list of predictions to score. Each predictions
76
- should be a string with tokens separated by spaces.
77
- references: list of reference for each prediction. Each
78
- reference should be a string with tokens separated by spaces.
79
  Returns:
80
- accuracy: description of the first score,
81
- another_score: description of the second score,
82
  Examples:
83
- Examples should be written in doctest format, and should illustrate how
84
- to use the function.
85
-
86
- >>> my_new_module = evaluate.load("my_new_module")
87
- >>> results = my_new_module.compute(references=[0, 1], predictions=[0, 1])
88
- >>> print(results)
89
- {'accuracy': 1.0}
90
  """
91
 
92
  # TODO: Define external resources urls if needed
@@ -95,7 +92,7 @@ BAD_WORDS_URL = "http://url/to/external/resource/bad_words.txt"
95
 
96
  @evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
97
  class SEScore(evaluate.Metric):
98
- """TODO: Short description of my evaluation module."""
99
 
100
  def _info(self):
101
  # TODO: Specifies the evaluate.EvaluationModuleInfo object
55
 
56
  # TODO: Add BibTeX citation
57
  _CITATION = """\
58
+ @inproceedings{xu-etal-2022-not,
59
+ title={Not All Errors are Equal: Learning Text Generation Metrics using Stratified Error Synthesis},
60
+ author={Xu, Wenda and Tuan, Yi-lin and Lu, Yujie and Saxon, Michael and Li, Lei and Wang, William Yang},
61
+ booktitle ={Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing},
62
+ month={dec},
63
+ year={2022},
64
+ url={https://arxiv.org/abs/2210.05035}
65
  }
66
  """
67
 
 
68
  _DESCRIPTION = """\
69
  SEScore is an evaluation metric that trys to compute an overall score to measure text generation quality.
70
  """
71
 
 
 
72
  _KWARGS_DESCRIPTION = """
73
+ Calculates how good are predictions given some references
74
  Args:
75
+ predictions: list of candidate outputs
76
+ references: list of references
 
 
77
  Returns:
78
+ {"mean_score": mean_score, "scores": scores}
79
+
80
  Examples:
81
+ >>> import evaluate
82
+ >>> sescore = evaluate.load("xu1998hz/sescore")
83
+ >>> score = sescore.compute(
84
+ references=['sescore is a simple but effective next-generation text evaluation metric'],
85
+ predictions=['sescore is simple effective text evaluation metric for next generation']
86
+ )
 
87
  """
88
 
89
  # TODO: Define external resources urls if needed
92
 
93
  @evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
94
  class SEScore(evaluate.Metric):
95
+ """SEScore"""
96
 
97
  def _info(self):
98
  # TODO: Specifies the evaluate.EvaluationModuleInfo object