Datasets:
Tags:
License:
import os | |
import json | |
import datasets | |
from datasets import BuilderConfig, Features, ClassLabel, Value, Sequence | |
_DESCRIPTION = """ | |
# 한국어 지시학습 데이터셋 | |
- dbpedia_14 데이터셋을 한국어로 변역한 데이터셋 | |
""" | |
_CITATION = """ | |
@inproceedings{KITD, | |
title={언어 번역 모델을 통한 한국어 지시 학습 데이터 세트 구축}, | |
author={임영서, 추현창, 김산, 장진예, 정민영, 신사임}, | |
booktitle={제 35회 한글 및 한국어 정보처리 학술대회}, | |
pages={591--595}, | |
month=oct, | |
year={2023} | |
} | |
""" | |
# dbpedia_14 | |
_DBPEDIA_14_FEATURES = Features({ | |
"data_index_by_user": Value(dtype="int32"), | |
"title": Value(dtype="string"), | |
"content": Value(dtype="string"), | |
"label": Value(dtype="int32"), | |
}) | |
def _parsing_dbpedia_14(file_path): | |
with open(file_path, mode="r") as f: | |
dataset = json.load(f) | |
for _idx, data in enumerate(dataset): | |
_data_index_by_user = data["data_index_by_user"] | |
_title = data["title"] | |
_content = data["content"] | |
_label = data["label"] | |
yield _idx, { | |
"data_index_by_user": _data_index_by_user, | |
"title": _title, | |
"content": _content, | |
"label": _label, | |
} | |
class Dbpedia_14Config(BuilderConfig): | |
def __init__(self, name, feature, reading_fn, parsing_fn, citation, **kwargs): | |
super(Dbpedia_14Config, self).__init__( | |
name = name, | |
version=datasets.Version("1.0.0"), | |
**kwargs) | |
self.feature = feature | |
self.reading_fn = reading_fn | |
self.parsing_fn = parsing_fn | |
self.citation = citation | |
class DBPEDIA_14(datasets.GeneratorBasedBuilder): | |
BUILDER_CONFIGS = [ | |
Dbpedia_14Config( | |
name = "base", | |
data_dir = "./dbpedia_14", | |
feature = _DBPEDIA_14_FEATURES, | |
reading_fn = _parsing_dbpedia_14, | |
parsing_fn = lambda x:x, | |
citation = _CITATION, | |
), | |
] | |
def _info(self) -> datasets.DatasetInfo: | |
"""Returns the dataset metadata.""" | |
return datasets.DatasetInfo( | |
description=_DESCRIPTION, | |
features=_DBPEDIA_14_FEATURES, | |
citation=_CITATION, | |
) | |
def _split_generators(self, dl_manager: datasets.DownloadManager): | |
"""Returns SplitGenerators""" | |
path_kv = { | |
datasets.Split.TRAIN:[ | |
os.path.join(dl_manager.manual_dir, f"train.json") | |
], | |
datasets.Split.TEST:[ | |
os.path.join(dl_manager.manual_dir, f"test.json") | |
], | |
} | |
return [ | |
datasets.SplitGenerator(name=k, gen_kwargs={"path_list": v}) | |
for k, v in path_kv.items() | |
] | |
def _generate_examples(self, path_list): | |
"""Yields examples.""" | |
for path in path_list: | |
try: | |
for example in iter(self.config.reading_fn(path)): | |
yield self.config.parsing_fn(example) | |
except Exception as e: | |
print(e) |