Spaces:
Running
Running
File size: 1,935 Bytes
6d7d1e3 9b970f9 411af2b 9b970f9 6d7d1e3 411af2b aae7167 e2828b6 0fa8793 6d7d1e3 c6f1559 6d7d1e3 32cfbbc 140e94b a0a9216 140e94b 26647e7 140e94b a0a9216 140e94b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 |
from datasets.features import Features, Sequence, Value
from evaluate.module import EvaluationModuleInfo
# from evaluate.evaluation_suite import SubTask
import evaluate
from typing import Optional, Union, Callable
from dataclasses import dataclass
from datasets import Dataset
@dataclass
class SubTask:
data: Optional[Union[str, Dataset]] = None
subset: Optional[str] = None
split: Optional[str] = None
data_preprocessor: Optional[Callable] = None
args_for_task: Optional[dict] = None
class Suite(evaluate.EvaluationSuite):
def _info(self):
return EvaluationModuleInfo(
module_type="evaluation_suite",
description="dummy metric for tests",
citation="insert citation here",
features=Features({"predictions": Value("int64"), "references": Value("int64")}))
def setup(self):
self.preprocessor = None #lambda x: x["text"].lower()
self.suite = [
SubTask(
data="imdb",
split="test[:10]",
data_preprocessor=lambda x: x["text"].lower(),#self.preprocessor,
args_for_task={
"metric": "accuracy",
"input_column": "text",
"label_column": "label",
"label_mapping": {
"LABEL_0": 0.0,
"LABEL_1": 1.0
}
}
),
SubTask(
data="sst2",
split="test[:10]",
data_preprocessor=self.preprocessor,
args_for_task={
"metric": "accuracy",
"input_column": "sentence",
"label_column": "label",
"label_mapping": {
"LABEL_0": 0.0,
"LABEL_1": 1.0
}
}
)
] |