# coding=utf-8 # Lint as: python3 """mindgames datasets""" from __future__ import absolute_import, division, print_function import json import os import textwrap import six import datasets CITATION = r""" @article{sileo2023mindgames, title={MindGames: Targeting Theory of Mind in Large Language Models with Dynamic Epistemic Modal Logic}, author={Sileo, Damien and Lernould, Antoine}, journal={arXiv preprint arXiv:2305.03353}, year={2023} } """ DESCRIPTION = """\ mindgames json tasks """ DATA_URL = "https://www.dropbox.com/s/yamo3jsgzdhkim8/mindgames.zip?dl=1" CONFIGS=['internal','forehead','forehead-mirror','explicit'] class mindgames_Config(datasets.BuilderConfig): """BuilderConfig for mindgames.""" def __init__( self, text_features, label_classes=None, **kwargs, ): """BuilderConfig for mindgames. Args: text_features: `dict[string, string]`, map from the name of the feature dict for each text field to the name of the column in the tsv file data_url: `string`, url to download the zip file from data_dir: `string`, the path to the folder containing the tsv files in the downloaded zip citation: `string`, citation for the data set url: `string`, url for information about the data set """ super(mindgames_Config, self).__init__( version=datasets.Version("1.0.0", ""), **kwargs ) self.text_features = text_features self.data_url = DATA_URL self.data_dir = self.name#os.path.join("", self.name) self.citation = textwrap.dedent(CITATION) self.description = "" class mindgames(datasets.GeneratorBasedBuilder): """The General Language Understanding Evaluation (mindgames) benchmark.""" BUILDER_CONFIG_CLASS = mindgames_Config BUILDER_CONFIGS = [ mindgames_Config( name=name, text_features={"inputs": "inputs"}, ) for name in CONFIGS ] def _info(self): #features["idx"] = datasets.Value("int32") return datasets.DatasetInfo( description=DESCRIPTION, #features=datasets.Features(features), citation=self.config.citation + "\n" + CITATION, ) def _split_generators(self, dl_manager): data_dir = dl_manager.download_and_extract(self.config.data_url) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "data_file": f"{data_dir}/train-{self.config.name}.jsonl", "split": "train", }, ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={ "data_file": f"{data_dir}/validation-{self.config.name}.jsonl", "split": "test", }, ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={ "data_file": f"{data_dir}/test-{self.config.name}.jsonl", "split": "validation", }, ), ] def _generate_examples(self, data_file,split): """Yields examples.""" with open(data_file, "r", encoding="utf-8") as f: for id_, line in enumerate(f): line_dict = json.loads(line) yield id_, line_dict