File size: 3,098 Bytes
9bcd0c4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
import os
import json
import datasets
from datasets import BuilderConfig, Features, ClassLabel, Value, Sequence


_DESCRIPTION = """
# 한국어 지시학습 데이터셋
- dbpedia_14 데이터셋을 한국어로 변역한 데이터셋
"""

_CITATION = """
@inproceedings{KITD,
  title={언어 번역 모델을 통한 한국어 지시 학습 데이터 세트 구축},
  author={임영서, 추현창, 김산, 장진예, 정민영, 신사임},
  booktitle={제 35회 한글 및 한국어 정보처리 학술대회},
  pages={591--595},
  month=oct,
  year={2023}
}
"""

# dbpedia_14
_DBPEDIA_14_FEATURES = Features({
    "data_index_by_user": Value(dtype="int32"),
    "title": Value(dtype="string"),
    "content": Value(dtype="string"),
    "label": Value(dtype="int32"),
})

def _parsing_dbpedia_14(file_path):
    with open(file_path, mode="r") as f:
        dataset = json.load(f)
    for _idx, data in enumerate(dataset):
        _data_index_by_user = data["data_index_by_user"]
        _title = data["title"]
        _content = data["content"]
        _label = data["label"]
        
        yield _idx, {
            "data_index_by_user": _data_index_by_user,
            "title": _title,
            "content": _content,
            "label": _label,
        }

class Dbpedia_14Config(BuilderConfig):
    def __init__(self, name, feature, reading_fn, parsing_fn, citation, **kwargs):
        super(Dbpedia_14Config, self).__init__(
            name = name,
            version=datasets.Version("1.0.0"),
            **kwargs)
        self.feature = feature
        self.reading_fn = reading_fn
        self.parsing_fn = parsing_fn
        self.citation = citation

class DBPEDIA_14(datasets.GeneratorBasedBuilder):
    BUILDER_CONFIGS = [
        Dbpedia_14Config(
            name = "base",
            data_dir = "./dbpedia_14",
            feature = _DBPEDIA_14_FEATURES,
            reading_fn = _parsing_dbpedia_14,
            parsing_fn = lambda x:x,
            citation = _CITATION,
        ),
    ]
    
    def _info(self) -> datasets.DatasetInfo:
        """Returns the dataset metadata."""
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=_DBPEDIA_14_FEATURES,
            citation=_CITATION,
        )
    
    def _split_generators(self, dl_manager: datasets.DownloadManager):
        """Returns SplitGenerators"""
        path_kv = {
            datasets.Split.TRAIN:[
                os.path.join(dl_manager.manual_dir, f"train.json")
            ],
            datasets.Split.TEST:[
                os.path.join(dl_manager.manual_dir, f"test.json")
            ],
        }
        return [
            datasets.SplitGenerator(name=k, gen_kwargs={"path_list": v})
            for k, v in path_kv.items()
        ]
    
    def _generate_examples(self, path_list):
        """Yields examples."""
        for path in path_list:
            try:
                for example in iter(self.config.reading_fn(path)):
                    yield self.config.parsing_fn(example)
            except Exception as e:
                print(e)