Spaces:
Runtime error
Runtime error
fill template
Browse files- README.md +10 -25
- app.py +2 -2
- element_count.py +13 -28
README.md
CHANGED
@@ -13,37 +13,22 @@ pinned: false
|
|
13 |
|
14 |
# Measurement Card for Element Count
|
15 |
|
16 |
-
***Module Card Instructions:*** *Fill out the following subsections. Feel free to take a look at existing measurement cards if you'd like examples.*
|
17 |
-
|
18 |
## Measurement Description
|
19 |
-
|
20 |
|
21 |
## How to Use
|
22 |
-
*Give general statement of how to use the measurement*
|
23 |
|
24 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
|
26 |
### Inputs
|
27 |
-
*List
|
28 |
-
- **input_field** *(type): Definition of input, with explanation if necessary. State any default value(s).*
|
29 |
|
30 |
### Output Values
|
31 |
|
32 |
-
|
33 |
-
|
34 |
-
*State the range of possible values that the measurement's output can take, as well as what in that range is considered good. For example: "This measurement can take on any value between 0 and 100, inclusive. Higher scores are better."*
|
35 |
-
|
36 |
-
#### Values from Popular Papers
|
37 |
-
*Give examples, preferrably with links to leaderboards or publications, to papers that have reported this measurement, along with the values they have reported.*
|
38 |
-
|
39 |
-
### Examples
|
40 |
-
*Give code examples of the measurement being used. Try to include examples that clear up any potential ambiguity left from the measurement description above. If possible, provide a range of examples that show both typical and atypical results, as well as examples where a variety of input parameters are passed.*
|
41 |
-
|
42 |
-
## Limitations and Bias
|
43 |
-
*Note any known limitations or biases that the measurement has, with links and references if possible.*
|
44 |
-
|
45 |
-
## Citation
|
46 |
-
*Cite the source where this measurement was introduced.*
|
47 |
-
|
48 |
-
## Further References
|
49 |
-
*Add any useful further references.*
|
13 |
|
14 |
# Measurement Card for Element Count
|
15 |
|
|
|
|
|
16 |
## Measurement Description
|
17 |
+
Counts the number of elements in dataset.
|
18 |
|
19 |
## How to Use
|
|
|
20 |
|
21 |
+
```python
|
22 |
+
import evaluate
|
23 |
+
measure = evaluate.load("lvwerra/element_count")
|
24 |
+
measure.compute(["a", "b", "c")
|
25 |
+
>>> {"element_count": 3}
|
26 |
+
```
|
27 |
+
|
28 |
|
29 |
### Inputs
|
30 |
+
- **data** *(``List``): List of strings or integers
|
|
|
31 |
|
32 |
### Output Values
|
33 |
|
34 |
+
Returns a dictionary with the element count.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
-
|
2 |
from evaluate.utils import launch_gradio_widget
|
3 |
|
4 |
|
5 |
-
module =
|
6 |
launch_gradio_widget(module)
|
1 |
+
import evaluate
|
2 |
from evaluate.utils import launch_gradio_widget
|
3 |
|
4 |
|
5 |
+
module = evaluate.load("lvwerra/element_count")
|
6 |
launch_gradio_widget(module)
|
element_count.py
CHANGED
@@ -28,29 +28,21 @@ year={2020}
|
|
28 |
|
29 |
# TODO: Add description of the module here
|
30 |
_DESCRIPTION = """\
|
31 |
-
|
32 |
"""
|
33 |
|
34 |
|
35 |
# TODO: Add description of the arguments of the module here
|
36 |
_KWARGS_DESCRIPTION = """
|
37 |
-
Calculates
|
38 |
Args:
|
39 |
-
|
40 |
-
should be a string with tokens separated by spaces.
|
41 |
-
references: list of reference for each prediction. Each
|
42 |
-
reference should be a string with tokens separated by spaces.
|
43 |
Returns:
|
44 |
-
|
45 |
-
another_score: description of the second score,
|
46 |
Examples:
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
>>> my_new_module = evaluate.load("my_new_module")
|
51 |
-
>>> results = my_new_module.compute(references=[0, 1], predictions=[0, 1])
|
52 |
-
>>> print(results)
|
53 |
-
{'accuracy': 1.0}
|
54 |
"""
|
55 |
|
56 |
# TODO: Define external resources urls if needed
|
@@ -70,10 +62,10 @@ class ElementCount(evaluate.EvaluationModule):
|
|
70 |
citation=_CITATION,
|
71 |
inputs_description=_KWARGS_DESCRIPTION,
|
72 |
# This defines the format of each prediction and reference
|
73 |
-
features=
|
74 |
-
'
|
75 |
-
'
|
76 |
-
|
77 |
# Homepage of the module for documentation
|
78 |
homepage="http://module.homepage",
|
79 |
# Additional links to the codebase or references
|
@@ -81,15 +73,8 @@ class ElementCount(evaluate.EvaluationModule):
|
|
81 |
reference_urls=["http://path.to.reference.url/new_module"]
|
82 |
)
|
83 |
|
84 |
-
def
|
85 |
-
"""Optional: download external resources useful to compute the scores"""
|
86 |
-
# TODO: Download external resources if needed
|
87 |
-
pass
|
88 |
-
|
89 |
-
def _compute(self, predictions, references):
|
90 |
"""Returns the scores"""
|
91 |
-
# TODO: Compute the different scores of the module
|
92 |
-
accuracy = sum(i == j for i, j in zip(predictions, references)) / len(predictions)
|
93 |
return {
|
94 |
-
"
|
95 |
}
|
28 |
|
29 |
# TODO: Add description of the module here
|
30 |
_DESCRIPTION = """\
|
31 |
+
A simple measurement that returns the number of elements in dataset.
|
32 |
"""
|
33 |
|
34 |
|
35 |
# TODO: Add description of the arguments of the module here
|
36 |
_KWARGS_DESCRIPTION = """
|
37 |
+
Calculates number of elements in dataset
|
38 |
Args:
|
39 |
+
data: list of elements.
|
|
|
|
|
|
|
40 |
Returns:
|
41 |
+
element_count: number of elements in dataset,
|
|
|
42 |
Examples:
|
43 |
+
>>> measure = evaluate.load("lvwerra/element_count")
|
44 |
+
>>> measure.compute(["a", "b", "c")
|
45 |
+
{"element_count": 3}
|
|
|
|
|
|
|
|
|
46 |
"""
|
47 |
|
48 |
# TODO: Define external resources urls if needed
|
62 |
citation=_CITATION,
|
63 |
inputs_description=_KWARGS_DESCRIPTION,
|
64 |
# This defines the format of each prediction and reference
|
65 |
+
features=[
|
66 |
+
datasets.Features({'data': datasets.Value('int64')}),
|
67 |
+
datasets.Features({'data': datasets.Value('string')})
|
68 |
+
],
|
69 |
# Homepage of the module for documentation
|
70 |
homepage="http://module.homepage",
|
71 |
# Additional links to the codebase or references
|
73 |
reference_urls=["http://path.to.reference.url/new_module"]
|
74 |
)
|
75 |
|
76 |
+
def _compute(self, data):
|
|
|
|
|
|
|
|
|
|
|
77 |
"""Returns the scores"""
|
|
|
|
|
78 |
return {
|
79 |
+
"element_count": len(data),
|
80 |
}
|