langdonholmes commited on
Commit
3082eb9
1 Parent(s): 6df6326
.ipynb_checkpoints/README-checkpoint.md ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: cohen_weighted_kappa
3
+ datasets:
4
+ -
5
+ tags:
6
+ - evaluate
7
+ - metric
8
+ description: "TODO: add a description here"
9
+ sdk: gradio
10
+ sdk_version: 3.19.1
11
+ app_file: app.py
12
+ pinned: false
13
+ ---
14
+
15
+ # Metric Card for cohen_weighted_kappa
16
+
17
+ ***Module Card Instructions:*** *Fill out the following subsections. Feel free to take a look at existing metric cards if you'd like examples.*
18
+
19
+ ## Metric Description
20
+ *Give a brief overview of this metric, including what task(s) it is usually used for, if any.*
21
+
22
+ ## How to Use
23
+ *Give general statement of how to use the metric*
24
+
25
+ *Provide simplest possible example for using the metric*
26
+
27
+ ### Inputs
28
+ *List all input arguments in the format below*
29
+ - **input_field** *(type): Definition of input, with explanation if necessary. State any default value(s).*
30
+
31
+ ### Output Values
32
+
33
+ *Explain what this metric outputs and provide an example of what the metric output looks like. Modules should return a dictionary with one or multiple key-value pairs, e.g. {"bleu" : 6.02}*
34
+
35
+ *State the range of possible values that the metric's output can take, as well as what in that range is considered good. For example: "This metric can take on any value between 0 and 100, inclusive. Higher scores are better."*
36
+
37
+ #### Values from Popular Papers
38
+ *Give examples, preferrably with links to leaderboards or publications, to papers that have reported this metric, along with the values they have reported.*
39
+
40
+ ### Examples
41
+ *Give code examples of the metric being used. Try to include examples that clear up any potential ambiguity left from the metric description above. If possible, provide a range of examples that show both typical and atypical results, as well as examples where a variety of input parameters are passed.*
42
+
43
+ ## Limitations and Bias
44
+ *Note any known limitations or biases that the metric has, with links and references if possible.*
45
+
46
+ ## Citation
47
+ *Cite the source where this metric was introduced.*
48
+
49
+ ## Further References
50
+ *Add any useful further references.*
.ipynb_checkpoints/app-checkpoint.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ import evaluate
2
+ from evaluate.utils import launch_gradio_widget
3
+
4
+
5
+ module = evaluate.load("langdonholmes/cohen_weighted_kappa")
6
+ launch_gradio_widget(module)
.ipynb_checkpoints/cohen_weighted_kappa-checkpoint.py ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """TODO: Add a description here."""
15
+
16
+ import evaluate
17
+ import datasets
18
+ from sklearn.metrics import cohen_kappa_score
19
+
20
+
21
+ # TODO: Add BibTeX citation
22
+ _CITATION = """\
23
+ @InProceedings{huggingface:module,
24
+ title = {A great new module},
25
+ authors={huggingface, Inc.},
26
+ year={2020}
27
+ }
28
+ """
29
+
30
+ # TODO: Add description of the module here
31
+ _DESCRIPTION = """\
32
+ This new module is designed to solve this great ML task and is crafted with a lot of care.
33
+ """
34
+
35
+
36
+ # TODO: Add description of the arguments of the module here
37
+ _KWARGS_DESCRIPTION = """
38
+ Calculates how good are predictions given some references, using certain scores
39
+ Args:
40
+ predictions: list of predictions to score. Each predictions
41
+ should be a string with tokens separated by spaces.
42
+ references: list of reference for each prediction. Each
43
+ reference should be a string with tokens separated by spaces.
44
+ Returns:
45
+ accuracy: description of the first score,
46
+ another_score: description of the second score,
47
+ Examples:
48
+ Examples should be written in doctest format, and should illustrate how
49
+ to use the function.
50
+
51
+ >>> my_new_module = evaluate.load("my_new_module")
52
+ >>> results = my_new_module.compute(references=[0, 1], predictions=[0, 1])
53
+ >>> print(results)
54
+ {'accuracy': 1.0}
55
+ """
56
+
57
+ # TODO: Define external resources urls if needed
58
+ BAD_WORDS_URL = "http://url/to/external/resource/bad_words.txt"
59
+
60
+
61
+ @evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
62
+ class cohen_weighted_kappa(evaluate.Metric):
63
+ """TODO: Short description of my evaluation module."""
64
+
65
+ def _info(self):
66
+ # TODO: Specifies the evaluate.EvaluationModuleInfo object
67
+ return evaluate.MetricInfo(
68
+ # This is the description that will appear on the modules page.
69
+ module_type="metric",
70
+ description=_DESCRIPTION,
71
+ citation=_CITATION,
72
+ inputs_description=_KWARGS_DESCRIPTION,
73
+ # This defines the format of each prediction and reference
74
+ features=datasets.Features({
75
+ 'predictions': datasets.Value('float'),
76
+ 'references': datasets.Value('float'),
77
+ }),
78
+ # Homepage of the module for documentation
79
+ homepage="http://module.homepage",
80
+ # Additional links to the codebase or references
81
+ codebase_urls=["http://github.com/path/to/codebase/of/new_module"],
82
+ reference_urls=["http://path.to.reference.url/new_module"]
83
+ )
84
+
85
+ def _download_and_prepare(self, dl_manager):
86
+ """Optional: download external resources useful to compute the scores"""
87
+ # TODO: Download external resources if needed
88
+ pass
89
+
90
+ def _compute(self, predictions, references):
91
+ """Returns the scores"""
92
+
93
+ agreement = cohen_kappa_score(references, np.round(predictions), weights='quadratic')
94
+
95
+ return {
96
+ "cohen_weighted_kappa": agreement,
97
+ }
.ipynb_checkpoints/requirements-checkpoint.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ git+https://github.com/huggingface/evaluate@main
2
+ scikit-learn
cohen_weighted_kappa.py CHANGED
@@ -15,6 +15,7 @@
15
 
16
  import evaluate
17
  import datasets
 
18
 
19
 
20
  # TODO: Add BibTeX citation
@@ -71,8 +72,8 @@ class cohen_weighted_kappa(evaluate.Metric):
71
  inputs_description=_KWARGS_DESCRIPTION,
72
  # This defines the format of each prediction and reference
73
  features=datasets.Features({
74
- 'predictions': datasets.Value('int64'),
75
- 'references': datasets.Value('int64'),
76
  }),
77
  # Homepage of the module for documentation
78
  homepage="http://module.homepage",
@@ -88,8 +89,9 @@ class cohen_weighted_kappa(evaluate.Metric):
88
 
89
  def _compute(self, predictions, references):
90
  """Returns the scores"""
91
- # TODO: Compute the different scores of the module
92
- accuracy = sum(i == j for i, j in zip(predictions, references)) / len(predictions)
 
93
  return {
94
- "accuracy": accuracy,
95
  }
 
15
 
16
  import evaluate
17
  import datasets
18
+ from sklearn.metrics import cohen_kappa_score
19
 
20
 
21
  # TODO: Add BibTeX citation
 
72
  inputs_description=_KWARGS_DESCRIPTION,
73
  # This defines the format of each prediction and reference
74
  features=datasets.Features({
75
+ 'predictions': datasets.Value('float'),
76
+ 'references': datasets.Value('float'),
77
  }),
78
  # Homepage of the module for documentation
79
  homepage="http://module.homepage",
 
89
 
90
  def _compute(self, predictions, references):
91
  """Returns the scores"""
92
+
93
+ agreement = cohen_kappa_score(references, np.round(predictions), weights='quadratic')
94
+
95
  return {
96
+ "cohen_weighted_kappa": agreement,
97
  }
requirements.txt CHANGED
@@ -1 +1,2 @@
1
- git+https://github.com/huggingface/evaluate@main
 
 
1
+ git+https://github.com/huggingface/evaluate@main
2
+ scikit-learn