lvwerra HF staff commited on
Commit
e27790a
1 Parent(s): c6f3d6d

Update Space (evaluate main: 828c6327)

Browse files
Files changed (4) hide show
  1. README.md +28 -10
  2. app.py +6 -0
  3. mcnemar.py +97 -0
  4. requirements.txt +4 -0
README.md CHANGED
@@ -1,12 +1,30 @@
1
  ---
2
- title: Mcnemar
3
- emoji: 🦀
4
- colorFrom: blue
5
- colorTo: yellow
6
- sdk: gradio
7
- sdk_version: 3.0.2
8
- app_file: app.py
9
- pinned: false
10
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
 
1
  ---
2
+ title: McNemar
3
+ emoji: 🤗
4
+ colorFrom: blue
5
+ colorTo: green
6
+ sdk: gradio
7
+ sdk_version: 3.0.2
8
+ app_file: app.py
9
+ pinned: false
10
+ tags:
11
+ - evaluate
12
+ - comparison
13
+ ---
14
+
15
+
16
+ # Comparison Card for McNemar
17
+
18
+ ## Comparison description
19
+
20
+ ## How to use
21
+
22
+ ## Output values
23
+
24
+ ### Values from popular papers
25
+
26
+ ## Examples
27
+
28
+ ## Limitations and bias
29
 
30
+ ## Citations
app.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ import evaluate
2
+ from evaluate.utils import launch_gradio_widget
3
+
4
+
5
+ module = evaluate.load("mcnemar", type="comparison")
6
+ launch_gradio_widget(module)
mcnemar.py ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Evaluate Authors
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """McNemar test for model comparison."""
15
+
16
+ import datasets
17
+ from scipy.stats import chi2
18
+
19
+ import evaluate
20
+
21
+
22
+ _DESCRIPTION = """
23
+ McNemar's test is a diagnostic test over a contingency table resulting from the predictions of two classifiers. The test compares the sensitivity and specificity of the diagnostic tests on the same group reference labels. It can be computed with:
24
+ McNemar = (SE - SP)**2 / SE + SP
25
+ Where:
26
+ SE: Sensitivity (Test 1 positive; Test 2 negative)
27
+ SP: Specificity (Test 1 negative; Test 2 positive)
28
+ """
29
+
30
+
31
+ _KWARGS_DESCRIPTION = """
32
+ Args:
33
+ predictions1 (`list` of `int`): Predicted labels for model 1.
34
+ predictions2 (`list` of `int`): Predicted labels for model 2.
35
+ references (`list` of `int`): Ground truth labels.
36
+
37
+ Returns:
38
+ p (`float` or `int`): McNemar test score. Minimum possible value is 0. Maximum possible value is 1.0. A lower p value means a more significant difference.
39
+
40
+ Examples:
41
+ >>> mcnemar = evaluate.load("mcnemar")
42
+ >>> results = mcnemar.compute(references=[1, 0, 1], predictions1=[1, 1, 1], predictions2=[1, 0, 1])
43
+ >>> print(results)
44
+ {'stat': 1.0, 'p': 0.31731050786291115}
45
+ """
46
+
47
+
48
+ _CITATION = """
49
+ @article{mcnemar1947note,
50
+ title={Note on the sampling error of the difference between correlated proportions or percentages},
51
+ author={McNemar, Quinn},
52
+ journal={Psychometrika},
53
+ volume={12},
54
+ number={2},
55
+ pages={153--157},
56
+ year={1947},
57
+ publisher={Springer-Verlag}
58
+ }
59
+ """
60
+
61
+
62
+ @evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
63
+ class McNemar(evaluate.EvaluationModule):
64
+ def _info(self):
65
+ return evaluate.EvaluationModuleInfo(
66
+ type="comparison",
67
+ description=_DESCRIPTION,
68
+ citation=_CITATION,
69
+ inputs_description=_KWARGS_DESCRIPTION,
70
+ features=datasets.Features(
71
+ {
72
+ "predictions1": datasets.Value("int64"),
73
+ "predictions2": datasets.Value("int64"),
74
+ "references": datasets.Value("int64"),
75
+ }
76
+ ),
77
+ )
78
+
79
+ def _compute(self, predictions1, predictions2, references):
80
+ # construct contingency table
81
+ tbl = [[0, 0], [0, 0]]
82
+ for gt, p1, p2 in zip(references, predictions1, predictions2):
83
+ if p1 == gt and p2 == gt:
84
+ tbl[0][0] += 1
85
+ elif p1 == gt:
86
+ tbl[0][1] += 1
87
+ elif p2 == gt:
88
+ tbl[1][0] += 1
89
+ else:
90
+ tbl[1][1] += 1
91
+
92
+ # compute statistic
93
+ b, c = tbl[0][1], tbl[1][0]
94
+ statistic = abs(b - c) ** 2 / (1.0 * (b + c))
95
+ df = 1
96
+ pvalue = chi2.sf(statistic, df)
97
+ return {"stat": statistic, "p": pvalue}
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # TODO: fix github to release
2
+ git+https://github.com/huggingface/evaluate.git@b6e6ed7f3e6844b297bff1b43a1b4be0709b9671
3
+ datasets~=2.0
4
+ scipy