glue-suite-v2 / glue-suite-v2.py
mathemakitten's picture
ugh
140e94b
raw
history blame
1.61 kB
from datasets.features import Features, Sequence, Value
from evaluate.module import EvaluationModuleInfo
from evaluate.evaluation_suite import SubTask
import evaluate
class Suite(evaluate.EvaluationSuite):
def _info(self):
return EvaluationModuleInfo(
module_type="evaluation_suite",
description="dummy metric for tests",
citation="insert citation here",
features=Features({"predictions": Value("int64"), "references": Value("int64")}))
def setup(self):
self.preprocessor = None #lambda x: x["text"].lower()
self.suite = [
SubTask(
data="imdb",
split="test",
data_preprocessor=lambda x: x["text"].lower(),#self.preprocessor,
args_for_task={
"metric": "accuracy",
"input_column": "text",
"label_column": "label",
"label_mapping": {
"LABEL_0": 0.0,
"LABEL_1": 1.0
}
}
),
SubTask(
data="sst2",
split="test[:10]",
data_preprocessor=self.preprocessor,
# args_for_task={
# "metric": "accuracy",
# "input_column": "sentence",
# "label_column": "label",
# "label_mapping": {
# "LABEL_0": 0.0,
# "LABEL_1": 1.0
# }
# }
)
]