Spaces:
Running
Running
File size: 1,613 Bytes
9b970f9 ff88b1e 6d7d1e3 411af2b 3d24166 ff88b1e 411af2b aae7167 e2828b6 0fa8793 23752a5 32cfbbc 140e94b a0a9216 140e94b 26647e7 140e94b a0a9216 140e94b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 |
import evaluate
from evaluate.evaluation_suite import SubTask
from typing import Optional, Union, Callable
from dataclasses import dataclass
from datasets import Dataset
# @dataclass
# class SubTask:
# data: Optional[Union[str, Dataset]] = None
# subset: Optional[str] = None
# split: Optional[str] = None
# data_preprocessor: Optional[Callable] = None
# args_for_task: Optional[dict] = None
class Suite(evaluate.EvaluationSuite):
def __init__(self, name):
super().__init__(name)
def setup(self):
self.preprocessor = None #lambda x: x["text"].lower()
self.suite = [
SubTask(
data="imdb",
split="test[:10]",
data_preprocessor=lambda x: x["text"].lower(),#self.preprocessor,
args_for_task={
"metric": "accuracy",
"input_column": "text",
"label_column": "label",
"label_mapping": {
"LABEL_0": 0.0,
"LABEL_1": 1.0
}
}
),
SubTask(
data="sst2",
split="test[:10]",
data_preprocessor=self.preprocessor,
args_for_task={
"metric": "accuracy",
"input_column": "sentence",
"label_column": "label",
"label_mapping": {
"LABEL_0": 0.0,
"LABEL_1": 1.0
}
}
)
] |