import os import datasets import pandas as pd import json class semiRelConfig(datasets.BuilderConfig): def __init__(self, features, data_url, **kwargs): super(semiRelConfig, self).__init__(**kwargs) self.features = features self.data_url = data_url class semiRel(datasets.GeneratorBasedBuilder): BUILDER_CONFIGS = [ semiRelConfig( name="pairs", features={ "ltable_id": datasets.Value("string"), "rtable_id": datasets.Value("string"), "label": datasets.Value("string"), }, data_url="https://huggingface.co/datasets/matchbench/semi-Rel/resolve/main/", ), semiRelConfig( name="source", features={ "id": datasets.Value("string"), "title": datasets.Value("string"), "director": datasets.Value("string"), "actors": datasets.Value("string"), "year": datasets.Value("string"), "rating": datasets.Value("string"), "information": datasets.Value("string"), }, data_url="https://huggingface.co/datasets/matchbench/semi-Rel/resolve/main/left.csv", ), semiRelConfig( name="target", features={ "content": datasets.Value("string"), }, data_url="https://huggingface.co/datasets/matchbench/semi-Rel/resolve/main/right.json", ), ] def _info(self): return datasets.DatasetInfo( features=datasets.Features(self.config.features) ) def _split_generators(self, dl_manager): if self.config.name == "pairs": return [ datasets.SplitGenerator( name=split, gen_kwargs={ "path_file": dl_manager.download_and_extract( os.path.join(self.config.data_url, f"{split}.csv")), "split": split, } ) for split in ["train", "valid", "test"] ] if self.config.name == "source": return [datasets.SplitGenerator(name="source", gen_kwargs={ "path_file": dl_manager.download_and_extract(self.config.data_url), "split": "source", })] if self.config.name == "target": return [datasets.SplitGenerator(name="target", gen_kwargs={ "path_file": dl_manager.download_and_extract(self.config.data_url), "split": "target", })] def _generate_examples(self, path_file, split): if split in ['target']: with open(path_file, "r") as f: file = json.load(f) for i in range(len(file)): yield i, { "content": json.dumps(file[i]) } elif split in ['source']: file = pd.read_csv(path_file) for i, row in file.iterrows(): yield i, { "id": row["id"], "title": row["title"], "director": row["director"], "actors": row["actors"], "year": row["year"], "rating": row["rating"], "information": row["information"], } else: file = pd.read_csv(path_file) for i, row in file.iterrows(): yield i, { "ltable_id": row["ltable_id"], "rtable_id": row["rtable_id"], "label": row["label"], }