from datasets.features import Features, Sequence, Value from evaluate.module import EvaluationModuleInfo import evaluate class Suite(evaluate.EvaluationSuite): def _info(self): return EvaluationModuleInfo( description="dummy metric for tests", citation="insert citation here", features=Features({"predictions": Value("int64"), "references": Value("int64")})) def __init__(self): super().__init__() self.preprocessor = None #lambda x: x["text"].lower() self.suite = [ evaluate.SubTask( data="imdb", split="test", data_preprocessor=self.preprocessor, args_for_task={ "metric": "accuracy", "input_column": "text", "label_column": "label", "label_mapping": { "LABEL_0": 0.0, "LABEL_1": 1.0 } } ), evaluate.SubTask( data="sst2", split="test[:10]", data_preprocessor=self.preprocessor, args_for_task={ "metric": "accuracy", "input_column": "sentence", "label_column": "label", "label_mapping": { "LABEL_0": 0.0, "LABEL_1": 1.0 } } ) ]