File size: 2,128 Bytes
1075a6f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8a7c4e4
1075a6f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
60c4f83
1075a6f
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
import datasets

_DESCRIPTION = """\
We introduce **2D-ATOMS** dataset, a novel text-based dataset that evaluates machine's reasoning process under situated theory-of-mind setting.

Our dataset includes 9 different ToM evaluation tasks for each mental state under ATOMS[1], and 1 reality-checking task to test LLMs’ understanding of the world. It is important to acknowledge that our experiment serves as a proof of concept and does not aim to cover the entire spectrum of machine ToM, as our case studies are far from being exhaustive or systematic. Here we release the zero-shot version of our dataset, which is used in our paper.
"""

class TRIP(datasets.GeneratorBasedBuilder):
    VERSION = datasets.Version("1.0.1")

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "prompt": datasets.Value("string"),
                    "answer": datasets.Value("string"),
                      
                }
            ),
        )
    
    def _split_generators(self, dl_manager: datasets.DownloadManager):
        """Returns SplitGenerators."""
        task_list = [
            "task0_reality_check",
            "task1_short_term_intention",
            "task2_long_term_intention",
            "task3_desire",
            "task4_perception",
            "task5_first_order_belief",
            "task6_second_order_belief",
            "task7_non_literal_communication",
            "task8_knowledge",
            "task9_emotions"
        ]
        data_roots = dl_manager.download_and_extract({k: f"{k}.json" for k in task_list})
        return [
            datasets.SplitGenerator(
                name=split,
                gen_kwargs={
                    "filepath": data_roots[split],
                },
            )
            for split in task_list
        ]

    def _generate_examples(self, filepath):
        # load jsonl file
        import json
        with open(filepath) as f:
            data = json.load(f)
        for i, example in enumerate(data):
            yield i, example