import os import json import datasets _DESCRIPTION = """ """ _CITATION = """ """ _LICENCE = """ """ _FEATURES = datasets.Features({ "x": datasets.Value(dtype="string"), "y": datasets.Value(dtype="string") }) class GenMix50kConfig(datasets.BuilderConfig): def __init__(self, features, citation, **kwargs): super(GenMix50kConfig, self).__init__(version=datasets.Version("1.0.3"), **kwargs) self.features = features self.citation = citation class GenMix50k(datasets.GeneratorBasedBuilder): BUILDER_CONFIGS = [ GenMix50kConfig( name="translation", data_dir="./translation", features=_FEATURES, citation=_CITATION ), GenMix50kConfig( name="dialogue", data_dir="./dialogue", features=_FEATURES, citation=_CITATION ), GenMix50kConfig( name="summarization", data_dir="./summarization", features=_FEATURES, citation=_CITATION ) ] def _info(self) -> datasets.DatasetInfo: """Returns the dataset metadata.""" return datasets.DatasetInfo( description=_DESCRIPTION, features=self.config.features, citation=self.config.citation, license=_LICENCE, supervised_keys=None ) def _split_generators(self, dl_manager: datasets.DownloadManager): """Returns SplitGenerators""" data_dir = os.path.join(dl_manager.manual_dir, self.config.name) path_kv = { datasets.Split.TRAIN: os.path.join(data_dir, "train.json"), datasets.Split.VALIDATION: os.path.join(data_dir, "valid.json"), datasets.Split.TEST: os.path.join(data_dir, "test.json") } splits = [] for split, path in path_kv.items(): if os.path.isfile(path): splits.append(datasets.SplitGenerator(name=split, gen_kwargs={"filepath": path})) else: print(f"Warning: File not found for {split} split: {path}") if not splits: raise datasets.data_files.DataFilesNotFoundError(f"No valid data files found in {data_dir} for splits: {path_kv.keys()}") return splits def _generate_examples(self, filepath): with open(filepath, encoding="utf-8") as f: for idx, line in enumerate(f): example = json.loads(line) yield idx, example