Datasets:
Update mindgames.py
Browse files- mindgames.py +21 -85
mindgames.py
CHANGED
@@ -25,104 +25,40 @@ DESCRIPTION = """\
|
|
25 |
mindgames json tasks
|
26 |
"""
|
27 |
|
28 |
-
|
|
|
|
|
29 |
|
30 |
-
CONFIGS=['forehead','forehead-mirror','explicit','internal','all']
|
31 |
|
32 |
-
class mindgames_Config(datasets.BuilderConfig):
|
33 |
-
"""BuilderConfig for mindgames."""
|
34 |
|
35 |
-
def __init__(
|
36 |
-
self,
|
37 |
-
text_features,
|
38 |
-
label_classes=None,
|
39 |
-
**kwargs,
|
40 |
-
):
|
41 |
-
"""BuilderConfig for mindgames.
|
42 |
-
Args:
|
43 |
-
text_features: `dict[string, string]`, map from the name of the feature
|
44 |
-
dict for each text field to the name of the column in the tsv file
|
45 |
-
data_url: `string`, url to download the zip file from
|
46 |
-
data_dir: `string`, the path to the folder containing the tsv files in the
|
47 |
-
downloaded zip
|
48 |
-
citation: `string`, citation for the data set
|
49 |
-
url: `string`, url for information about the data set
|
50 |
-
"""
|
51 |
-
|
52 |
-
super(mindgames_Config, self).__init__(
|
53 |
-
version=datasets.Version("1.0.0", ""), **kwargs
|
54 |
-
)
|
55 |
-
|
56 |
-
self.text_features = text_features
|
57 |
-
self.data_url = DATA_URL
|
58 |
-
self.data_dir = self.name#os.path.join("", self.name)
|
59 |
-
self.citation = textwrap.dedent(CITATION)
|
60 |
-
self.description = ""
|
61 |
|
|
|
|
|
62 |
|
63 |
class mindgames(datasets.GeneratorBasedBuilder):
|
64 |
-
|
65 |
-
"""The General Language Understanding Evaluation (mindgames) benchmark."""
|
66 |
-
|
67 |
-
BUILDER_CONFIG_CLASS = mindgames_Config
|
68 |
-
DEFAULT_CONFIG_NAME = "all"
|
69 |
-
|
70 |
BUILDER_CONFIGS = [
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
]
|
76 |
|
77 |
-
def
|
78 |
-
|
79 |
-
"hypothesis": datasets.Value("string"),
|
80 |
-
"premise": datasets.Value("string"),
|
81 |
-
"label": datasets.Value("int32"),
|
82 |
-
"n_announcements": datasets.Value("int32"),
|
83 |
-
"n_agents": datasets.Value("int32"),
|
84 |
-
"hypothesis_depth": datasets.Value("int32"),
|
85 |
-
"index": datasets.Value("int32")
|
86 |
-
|
87 |
-
}
|
88 |
-
for k in ['smcdel_problem',"pbcheck","names","setup","s-l","deberta_pred","deberta_confidence","difficulty"]:
|
89 |
-
features[k]=datasets.Value("string")
|
90 |
-
return datasets.DatasetInfo(
|
91 |
-
description=DESCRIPTION,
|
92 |
-
features=datasets.Features(features),
|
93 |
-
citation=self.config.citation + "\n" + CITATION,
|
94 |
-
)
|
95 |
-
|
96 |
-
def _split_generators(self, dl_manager):
|
97 |
-
data_dir = dl_manager.download_and_extract(self.config.data_url)
|
98 |
-
|
99 |
return [
|
100 |
-
datasets.SplitGenerator(
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
"split": "train",
|
105 |
-
},
|
106 |
-
),
|
107 |
-
datasets.SplitGenerator(
|
108 |
-
name=datasets.Split.VALIDATION,
|
109 |
-
gen_kwargs={
|
110 |
-
"data_file": f"{data_dir}/validation-{self.config.name}.jsonl",
|
111 |
-
"split": "test",
|
112 |
-
},
|
113 |
-
),
|
114 |
-
datasets.SplitGenerator(
|
115 |
-
name=datasets.Split.TEST,
|
116 |
-
gen_kwargs={
|
117 |
-
"data_file": f"{data_dir}/test-{self.config.name}.jsonl",
|
118 |
-
"split": "validation",
|
119 |
-
},
|
120 |
-
),
|
121 |
]
|
122 |
|
123 |
-
def
|
|
|
|
|
|
|
124 |
"""Yields examples."""
|
125 |
-
with open(
|
126 |
for id_, line in enumerate(f):
|
127 |
line_dict = json.loads(line)
|
128 |
yield id_, line_dict
|
|
|
25 |
mindgames json tasks
|
26 |
"""
|
27 |
|
28 |
+
CONFIGS=['forehead','forehead_mirror','explicit','internal','all']
|
29 |
+
_URLs = {(x,y):f'https://huggingface.co/datasets/sileod/mindgames/resolve/main/data/{x}-{y}.jsonl' for x in ['train','validation','test'] for y in CONFIGS}
|
30 |
+
files = ['-'.join(x) for x in _URLs]
|
31 |
|
|
|
32 |
|
|
|
|
|
33 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
|
35 |
+
class mindgamesConfig(datasets.BuilderConfig):
|
36 |
+
citation=CITATION
|
37 |
|
38 |
class mindgames(datasets.GeneratorBasedBuilder):
|
39 |
+
|
|
|
|
|
|
|
|
|
|
|
40 |
BUILDER_CONFIGS = [
|
41 |
+
mindgamesConfig(
|
42 |
+
name=n,
|
43 |
+
data_dir=n
|
44 |
+
) for n in CONFIGS
|
45 |
]
|
46 |
|
47 |
+
def _split_generators(self, dl_manager: datasets.DownloadManager):
|
48 |
+
data_file = dl_manager.download(_URLs)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
49 |
return [
|
50 |
+
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_file['train',self.config.data_dir]}),
|
51 |
+
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": data_file['validation',self.config.data_dir]}),
|
52 |
+
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_file['test',self.config.data_dir]}),
|
53 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
54 |
]
|
55 |
|
56 |
+
def _info(self):
|
57 |
+
return datasets.DatasetInfo()
|
58 |
+
def _generate_examples(self, filepath):
|
59 |
+
print(filepath)
|
60 |
"""Yields examples."""
|
61 |
+
with open(filepath, "r", encoding="utf-8") as f:
|
62 |
for id_, line in enumerate(f):
|
63 |
line_dict = json.loads(line)
|
64 |
yield id_, line_dict
|