File size: 1,442 Bytes
9b970f9
ff88b1e
6d7d1e3
aae7167
e2828b6
0fa8793
23752a5
 
32cfbbc
140e94b
9d843f5
140e94b
 
9d843f5
140e94b
a0a9216
1cd7e21
140e94b
 
 
 
 
 
 
 
26647e7
140e94b
 
220f6b8
140e94b
 
 
a0a9216
 
 
 
 
 
 
 
 
140e94b
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
import evaluate
from evaluate.evaluation_suite import SubTask


class Suite(evaluate.EvaluationSuite):

    def __init__(self, name):
        super().__init__(name)

    def setup(self):
        self.preprocessor = lambda x: {x["text"]: x["text"].lower()}  # dataset.map(lambda example, idx: {"sentence2": f"{idx}: " + example["sentence2"]}, with_indices=True)
        self.suite = [
            SubTask(
                task_type="text-classification",
                data="imdb",
                split="test[:10]",
                data_preprocessor=self.preprocessor,
                args_for_task={
                    "metric": "accuracy",
                    "input_column": "text",
                    "label_column": "label",
                    "label_mapping": {
                        "LABEL_0": 0.0,
                        "LABEL_1": 1.0
                    }
                }
            ),
            SubTask(
                task_type="text-classification",
                data="sst2",
                split="test[:10]",
                data_preprocessor=self.preprocessor,
                args_for_task={
                    "metric": "accuracy",
                    "input_column": "sentence",
                    "label_column": "label",
                    "label_mapping": {
                        "LABEL_0": 0.0,
                        "LABEL_1": 1.0
                    }
                }
            )
        ]