lvwerra HF staff commited on
Commit
8bea1c6
1 Parent(s): d283111

add module default template

Browse files
Files changed (5) hide show
  1. README.md +42 -5
  2. app.py +6 -0
  3. element_count.py +95 -0
  4. requirements.txt +3 -0
  5. tests.py +17 -0
README.md CHANGED
@@ -1,12 +1,49 @@
1
  ---
2
- title: Element_count
3
- emoji: 💻
4
- colorFrom: pink
5
- colorTo: gray
 
 
6
  sdk: gradio
7
  sdk_version: 3.0.2
8
  app_file: app.py
9
  pinned: false
10
  ---
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ title: Element Count
3
+ datasets:
4
+ -
5
+ tags:
6
+ - evaluate
7
+ - Element Count
8
  sdk: gradio
9
  sdk_version: 3.0.2
10
  app_file: app.py
11
  pinned: false
12
  ---
13
 
14
+ # Measurement Card for Element Count
15
+
16
+ ***Module Card Instructions:*** *Fill out the following subsections. Feel free to take a look at existing measurement cards if you'd like examples.*
17
+
18
+ ## Measurement Description
19
+ *Give a brief overview of this measurement, including what task(s) it is usually used for, if any.*
20
+
21
+ ## How to Use
22
+ *Give general statement of how to use the measurement*
23
+
24
+ *Provide simplest possible example for using the measurement*
25
+
26
+ ### Inputs
27
+ *List all input arguments in the format below*
28
+ - **input_field** *(type): Definition of input, with explanation if necessary. State any default value(s).*
29
+
30
+ ### Output Values
31
+
32
+ *Explain what this measurement outputs and provide an example of what the measurement output looks like. Modules should return a dictionary with one or multiple key-value pairs, e.g. {"bleu" : 6.02}*
33
+
34
+ *State the range of possible values that the measurement's output can take, as well as what in that range is considered good. For example: "This measurement can take on any value between 0 and 100, inclusive. Higher scores are better."*
35
+
36
+ #### Values from Popular Papers
37
+ *Give examples, preferrably with links to leaderboards or publications, to papers that have reported this measurement, along with the values they have reported.*
38
+
39
+ ### Examples
40
+ *Give code examples of the measurement being used. Try to include examples that clear up any potential ambiguity left from the measurement description above. If possible, provide a range of examples that show both typical and atypical results, as well as examples where a variety of input parameters are passed.*
41
+
42
+ ## Limitations and Bias
43
+ *Note any known limitations or biases that the measurement has, with links and references if possible.*
44
+
45
+ ## Citation
46
+ *Cite the source where this measurement was introduced.*
47
+
48
+ ## Further References
49
+ *Add any useful further references.*
app.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
1
+ from element_count import ElementCount
2
+ from evaluate.utils import launch_gradio_widget
3
+
4
+
5
+ module = ElementCount()
6
+ launch_gradio_widget(module)
element_count.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """TODO: Add a description here."""
15
+
16
+ import evaluate
17
+ import datasets
18
+
19
+
20
+ # TODO: Add BibTeX citation
21
+ _CITATION = """\
22
+ @InProceedings{huggingface:module,
23
+ title = {A great new module},
24
+ authors={huggingface, Inc.},
25
+ year={2020}
26
+ }
27
+ """
28
+
29
+ # TODO: Add description of the module here
30
+ _DESCRIPTION = """\
31
+ This new module is designed to solve this great NLP task and is crafted with a lot of care.
32
+ """
33
+
34
+
35
+ # TODO: Add description of the arguments of the module here
36
+ _KWARGS_DESCRIPTION = """
37
+ Calculates how good are predictions given some references, using certain scores
38
+ Args:
39
+ predictions: list of predictions to score. Each predictions
40
+ should be a string with tokens separated by spaces.
41
+ references: list of reference for each prediction. Each
42
+ reference should be a string with tokens separated by spaces.
43
+ Returns:
44
+ accuracy: description of the first score,
45
+ another_score: description of the second score,
46
+ Examples:
47
+ Examples should be written in doctest format, and should illustrate how
48
+ to use the function.
49
+
50
+ >>> my_new_module = evaluate.load("my_new_module")
51
+ >>> results = my_new_module.compute(references=[0, 1], predictions=[0, 1])
52
+ >>> print(results)
53
+ {'accuracy': 1.0}
54
+ """
55
+
56
+ # TODO: Define external resources urls if needed
57
+ BAD_WORDS_URL = "http://url/to/external/resource/bad_words.txt"
58
+
59
+
60
+ @evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
61
+ class ElementCount(evaluate.EvaluationModule):
62
+ """TODO: Short description of my evaluation module."""
63
+
64
+ def _info(self):
65
+ # TODO: Specifies the evaluate.EvaluationModuleInfo object
66
+ return evaluate.EvaluationModuleInfo(
67
+ # This is the description that will appear on the modules page.
68
+ type="measurement",
69
+ description=_DESCRIPTION,
70
+ citation=_CITATION,
71
+ inputs_description=_KWARGS_DESCRIPTION,
72
+ # This defines the format of each prediction and reference
73
+ features=datasets.Features({
74
+ 'predictions': datasets.Value('int64'),
75
+ 'references': datasets.Value('int64'),
76
+ }),
77
+ # Homepage of the module for documentation
78
+ homepage="http://module.homepage",
79
+ # Additional links to the codebase or references
80
+ codebase_urls=["http://github.com/path/to/codebase/of/new_module"],
81
+ reference_urls=["http://path.to.reference.url/new_module"]
82
+ )
83
+
84
+ def _download_and_prepare(self, dl_manager):
85
+ """Optional: download external resources useful to compute the scores"""
86
+ # TODO: Download external resources if needed
87
+ pass
88
+
89
+ def _compute(self, predictions, references):
90
+ """Returns the scores"""
91
+ # TODO: Compute the different scores of the module
92
+ accuracy = sum(i == j for i, j in zip(predictions, references)) / len(predictions)
93
+ return {
94
+ "accuracy": accuracy,
95
+ }
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ # TODO: fix github to release
2
+ git+https://github.com/huggingface/evaluate.git@main
3
+ datasets~=2.0
tests.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ test_cases = [
2
+ {
3
+ "predictions": [0, 0],
4
+ "references": [1, 1],
5
+ "result": {"metric_score": 0}
6
+ },
7
+ {
8
+ "predictions": [1, 1],
9
+ "references": [1, 1],
10
+ "result": {"metric_score": 1}
11
+ },
12
+ {
13
+ "predictions": [1, 0],
14
+ "references": [1, 1],
15
+ "result": {"metric_score": 0.5}
16
+ }
17
+ ]