lvwerra HF staff commited on
Commit
429e24a
1 Parent(s): cec8f8a

add metric default template

Browse files
Files changed (5) hide show
  1. README.md +17 -6
  2. app.py +6 -0
  3. requirements.txt +3 -0
  4. test.py +94 -0
  5. tests.py +17 -0
README.md CHANGED
@@ -1,12 +1,23 @@
1
  ---
2
- title: Test
3
- emoji: 📚
4
- colorFrom: red
5
- colorTo: gray
 
6
  sdk: gradio
7
- sdk_version: 2.9.4
8
  app_file: app.py
9
  pinned: false
10
  ---
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ title: test
3
+ datasets:
4
+ -
5
+ tags:
6
+ - metric
7
  sdk: gradio
8
+ sdk_version: 2.8.13
9
  app_file: app.py
10
  pinned: false
11
  ---
12
 
13
+ # Metric card
14
+
15
+ ## Description
16
+
17
+ ## How to use
18
+
19
+ ## Examples
20
+
21
+ ## References
22
+
23
+ ## Limitations and bias
app.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
1
+ from test import test
2
+ from evaluate.utils import launch_gradio_widget
3
+
4
+
5
+ metric = test()
6
+ launch_gradio_widget(metric)
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ # TODO: fix github to release
2
+ git+https://github.com/huggingface/evaluate.git@metrics-template
3
+ datasets~=2.0
test.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """TODO: Add a description here."""
15
+
16
+ import evaluate
17
+ import datasets
18
+
19
+
20
+ # TODO: Add BibTeX citation
21
+ _CITATION = """\
22
+ @InProceedings{huggingface:metric,
23
+ title = {A great new metric},
24
+ authors={huggingface, Inc.},
25
+ year={2020}
26
+ }
27
+ """
28
+
29
+ # TODO: Add description of the metric here
30
+ _DESCRIPTION = """\
31
+ This new metric is designed to solve this great NLP task and is crafted with a lot of care.
32
+ """
33
+
34
+
35
+ # TODO: Add description of the arguments of the metric here
36
+ _KWARGS_DESCRIPTION = """
37
+ Calculates how good are predictions given some references, using certain scores
38
+ Args:
39
+ predictions: list of predictions to score. Each predictions
40
+ should be a string with tokens separated by spaces.
41
+ references: list of reference for each prediction. Each
42
+ reference should be a string with tokens separated by spaces.
43
+ Returns:
44
+ accuracy: description of the first score,
45
+ another_score: description of the second score,
46
+ Examples:
47
+ Examples should be written in doctest format, and should illustrate how
48
+ to use the function.
49
+
50
+ >>> my_new_metric = evaluate.load_metric("my_new_metric")
51
+ >>> results = my_new_metric.compute(references=[0, 1], predictions=[0, 1])
52
+ >>> print(results)
53
+ {'accuracy': 1.0}
54
+ """
55
+
56
+ # TODO: Define external resources urls if needed
57
+ BAD_WORDS_URL = "http://url/to/external/resource/bad_words.txt"
58
+
59
+
60
+ @evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
61
+ class test(evaluate.Metric):
62
+ """TODO: Short description of my metric."""
63
+
64
+ def _info(self):
65
+ # TODO: Specifies the evaluate.MetricInfo object
66
+ return evaluate.MetricInfo(
67
+ # This is the description that will appear on the metrics page.
68
+ description=_DESCRIPTION,
69
+ citation=_CITATION,
70
+ inputs_description=_KWARGS_DESCRIPTION,
71
+ # This defines the format of each prediction and reference
72
+ features=datasets.Features({
73
+ 'predictions': datasets.Value('int64'),
74
+ 'references': datasets.Value('int64'),
75
+ }),
76
+ # Homepage of the metric for documentation
77
+ homepage="http://metric.homepage",
78
+ # Additional links to the codebase or references
79
+ codebase_urls=["http://github.com/path/to/codebase/of/new_metric"],
80
+ reference_urls=["http://path.to.reference.url/new_metric"]
81
+ )
82
+
83
+ def _download_and_prepare(self, dl_manager):
84
+ """Optional: download external resources useful to compute the scores"""
85
+ # TODO: Download external resources if needed
86
+ pass
87
+
88
+ def _compute(self, predictions, references):
89
+ """Returns the scores"""
90
+ # TODO: Compute the different scores of the metric
91
+ accuracy = sum(i == j for i, j in zip(predictions, references)) / len(predictions)
92
+ return {
93
+ "accuracy": accuracy,
94
+ }
tests.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ test_cases = [
2
+ {
3
+ "predictions": [0, 0],
4
+ "references": [1, 1],
5
+ "result": {"metric_score": 0}
6
+ },
7
+ {
8
+ "predictions": [1, 1],
9
+ "references": [1, 1],
10
+ "result": {"metric_score": 1}
11
+ },
12
+ {
13
+ "predictions": [1, 0],
14
+ "references": [1, 1],
15
+ "result": {"metric_score": 0.5}
16
+ }
17
+ ]