# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TODO: Add a description here.""" import evaluate from datasets import Features, Sequence, Value import pdb from m2scorer import get_m2score, get_m2score_from_raw, load_m2 # TODO: Add BibTeX citation _CITATION = """\ @InProceedings{huggingface:module, title = {A great new module}, authors={huggingface, Inc.}, year={2020} } """ # TODO: Add description of the module here _DESCRIPTION = """\ This new module is designed to solve this great ML task and is crafted with a lot of care. """ # TODO: Add description of the arguments of the module here _KWARGS_DESCRIPTION = """ Calculates how good are predictions given some references, using certain scores Args: predictions: list of predictions to score. Each predictions should be a string with tokens separated by spaces. references: list of reference for each prediction. Each reference should be a string with tokens separated by spaces. Returns: accuracy: description of the first score, another_score: description of the second score, Examples: Examples should be written in doctest format, and should illustrate how to use the function. >>> my_new_module = evaluate.load("my_new_module") >>> results = my_new_module.compute(references=[0, 1], predictions=[0, 1]) >>> print(results) {'accuracy': 1.0} """ # TODO: Define external resources urls if needed BAD_WORDS_URL = "http://url/to/external/resource/bad_words.txt" @evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION) class M2(evaluate.Metric): """TODO: Short description of my evaluation module.""" def _info(self): # TODO: Specifies the evaluate.EvaluationModuleInfo object return evaluate.MetricInfo( # This is the description that will appear on the modules page. module_type="metric", description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, # This defines the format of each prediction and reference features=Features({ 'predictions': Value(dtype='string'), 'references': { 'source_sentence': Value(dtype='string'), 'edits': Sequence({ 'from': Value(dtype='int32'), 'to': Value(dtype='int32'), 'text': [Value(dtype='string')], 'aid': Value(dtype='int32'), }), }, }), # Homepage of the module for documentation homepage="http://module.homepage", # Additional links to the codebase or references codebase_urls=["http://github.com/path/to/codebase/of/new_module"], reference_urls=["http://path.to.reference.url/new_module"] ) def _download_and_prepare(self, dl_manager): """Optional: download external resources useful to compute the scores""" # TODO: Download external resources if needed pass def _compute(self, predictions, references): """Returns the scores""" gold_data = self._features_to_gold_data(references) # TODO: Compute the different scores of the module p, r, f = get_m2score(predictions, gold_data, tokenize=False, keep_gold=True) return { "f0.5": f, "precision": p, "recall": r, } def _features_to_gold_data(self, features): gold_data = [] for entry in features: annotators = {} edits = entry['edits'] for i in range(len(edits['from'])): edit = (edits['from'][i], edits['to'][i], edits['text'][i]) if edits['aid'][i] not in annotators: annotators[edits['aid'][i]] = [] annotators[edits['aid'][i]].append(edit) gold_data.append( (entry['source_sentence'], annotators) ) return gold_data def load_m2_file(self, fpath): data = load_m2(fpath) result = [] for src_sent, edits_ in data: edits = [] for aid, annotator_edits in edits_.items(): if len(annotator_edits) == 0: edits.append({'from': -1, 'to': -1, 'text': [''], 'aid': aid}) for from_, to_, text_ in annotator_edits: edits.append({'from': from_, 'to': to_, 'text': text_, 'aid': aid}) result.append({ 'source_sentence': src_sent, 'edits': edits, }) return result