import json import datasets import pandas as pd _CITATION = """""" _DESCRIPTION = """""" _LICENSE = "CC-BY-SA-4.0" # _URL = "https://github.com/boostcampaitech2/data-annotation-nlp-level3-nlp-14" _DATA_URLS = { "train": "https://huggingface.co/datasets/raki-1203/ai_hub_summarization/resolve/main/train_dict.json", "dev": "https://huggingface.co/datasets/raki-1203/ai_hub_summarization/resolve/main/valid_dict.json", } _VERSION = "0.0.0" class AiHubSummarizationConfig(datasets.BuilderConfig): def __init__(self, data_url, **kwargs): super().__init__(version=datasets.Version(_VERSION), **kwargs) self.data_url = data_url class AiHubSummarization(datasets.GeneratorBasedBuilder): DEFAULT_CONFIG_NAME = "ai_hub_summarization" BUILDER_CONFIGS = [ AiHubSummarizationConfig( name="ai_hub_summarization", data_url=_DATA_URLS, description=_DESCRIPTION, ) ] def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "data_name": datasets.Value("string"), "doc_id": datasets.Value("string"), "doc_name": datasets.Value("string"), "passage": datasets.Value("string"), "abstract_summary": datasets.Value("string"), } ), license=_LICENSE, citation=_CITATION, supervised_keys=None, ) def _split_generators(self, dl_manager): """ Returns SplitGenerators. """ data_file = dl_manager.download_and_extract(self.config.data_url) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "data_file": data_file["train"], "split": "train", }, ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={ "data_file": data_file["dev"], "split": "valid", }, ), ] def _generate_examples(self, data_file: str, split: str): """ Yields examples. """ with open(data_file, newline='', encoding="UTF-8") as f: json_file = json.load(f) df = pd.DataFrame(json_file) for idx, row in df.iterrows(): features = { "data_name": row['data_name'], "doc_id": row['doc_id'], "doc_name": row['doc_name'], "passage": row['passage'], "abstract_summary": row['abstract_summary'], } yield idx, features idx += 1