from __future__ import absolute_import, division, print_function import datasets import json _URL = "data/" _URLs = { "train": _URL + "train.tsv", "valid": _URL + "valid.tsv", "test": _URL + "test.tsv", } class TalktalkSentiment_210713_multi_singleturn_custom_multiturn( datasets.GeneratorBasedBuilder): def _info(self): return datasets.DatasetInfo( description="TalkTalk Sentiment Classification Multiturn Dataset (210713 Version)", features=datasets.Features( { "text": datasets.Value("string"), "label": datasets.features.ClassLabel( names=['매우 긍정', '다소 긍정', '중립', '다소 부정', '매우 부정', '모름']), "text_2": datasets.Value("string"), "label_2": datasets.Value("string"), "acc_label": datasets.Value("string") } ), supervised_keys=None, license="", homepage="", citation="", ) def _split_generators(self, dl_manager): downloaded_files = dl_manager.download_and_extract(_URLs) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "filepath": downloaded_files["train"], } ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={ "filepath": downloaded_files["valid"], } ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={ "filepath": downloaded_files["test"], } ), ] def _generate_examples(self, filepath): with open(filepath, "r", encoding='UTF-8') as f: for idx, line in enumerate(f): try: text, label = line.split("\t") yield idx, {"text": text.strip(), "label": label.strip(), "text_2": '', "label_2": '', "acc_label": ''} except: text, label, acc_label = line.split("\t") yield idx, {"text": '', "label": '모름', "text_2": str(text).strip(), "label_2": str(label).strip(), "acc_label": str(acc_label).strip()}