Spaces:
Runtime error
Runtime error
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
"""TODO: Add a description here.""" | |
import evaluate | |
import datasets | |
from sklearn.metrics import roc_auc_score | |
# TODO: Add BibTeX citation | |
_CITATION = """\ | |
@InProceedings{huggingface:module, | |
title = {Roc AUC Macro}, | |
authors={Mike Frantz}, | |
year={2022} | |
} | |
""" | |
# TODO: Add description of the module here | |
_DESCRIPTION = """\ | |
This module makes scikit-learn's roc_auc_score with average='macro' | |
available as a metric in the hub for use in multi-class or multi-label | |
classification. | |
""" | |
# TODO: Add description of the arguments of the module here | |
_KWARGS_DESCRIPTION = """ | |
Compute Area Under the Receiver Operating Characteristic Curve (ROC AUC) from prediction scores. | |
Calculates metrics for each label, and find their unweighted mean. This does not take label imbalance into account. | |
(FROM https://scikit-learn.org/stable/modules/generated/sklearn.metrics.roc_auc_score.html) | |
Args: | |
scores: An array of shape (n_samples, n_classes) of probability estimates. | |
In the multi-class case, the probabilities must sum to 1 across classes. | |
In the multi-label case, the probabilities, each output corresponds | |
to a binary decision for a particular label. | |
references: list of reference for each prediction. Each | |
reference should be an array of shape (n_sampls, n_classes) with values | |
of 0 or 1. | |
Returns: | |
roc_auc_macro | |
Examples: | |
Examples should be written in doctest format, and should illustrate how | |
to use the function. | |
>>> roc_auc_macro = evaluate.load("mgfrantz/roc_auc_macro") | |
>>> results = my_new_module.compute( | |
references=[[1, 0, 1], [0, 1, 0]], | |
scores=[[.8, .2, .9], [.1, .8, .6]] | |
) | |
>>> print(results) | |
{'roc_auc_macro': 1.0} | |
""" | |
class roc_auc_macro(evaluate.Metric): | |
"""TODO: Short description of my evaluation module.""" | |
def _info(self): | |
# TODO: Specifies the evaluate.EvaluationModuleInfo object | |
return evaluate.MetricInfo( | |
# This is the description that will appear on the modules page. | |
module_type="metric", | |
description=_DESCRIPTION, | |
citation=_CITATION, | |
inputs_description=_KWARGS_DESCRIPTION, | |
# This defines the format of each prediction and reference | |
features=datasets.Features({ | |
'scores': datasets.Value('float64'), | |
'references': datasets.Value('int64'), | |
}), | |
# Homepage of the module for documentation | |
homepage="N/A", | |
# Additional links to the codebase or references | |
codebase_urls=["N/A"], | |
reference_urls=["N/A"] | |
) | |
def _download_and_prepare(self, dl_manager): | |
"""Optional: download external resources useful to compute the scores""" | |
# TODO: Download external resources if needed | |
pass | |
def _compute(self, predictions, references): | |
"""Returns the scores""" | |
return { | |
"roc_auc_macro": roc_auc_score(references, scores, average='macro'), | |
} |