File size: 4,744 Bytes
9b5b745
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
import csv
import os

import datasets

_CITATION = """\
@Dataset{wisdomify:storyteller,
title = {Korean proverb definitions and examples},
author={Jongyoon Kim, Yubin Kim, Yongtaek Im
},
year={2021}
}
"""

_DESCRIPTION = """\
This dataset is designed to provide forward and reverse dictionary of Korean proverbs.
"""

# TODO: Add a link to an official homepage for the dataset here
_HOMEPAGE = ""

# TODO: Add the licence for the dataset here if you can find it
_LICENSE = ""

# TODO: Add link to the official dataset URLs here
#  If it is dropbox link, you must set 1 for query parameter "dl".
_URLs = {
    'definition': "https://www.dropbox.com/s/4uh564afaimtob3/definition.zip?dl=1",
    'example': "https://www.dropbox.com/s/adlt9n6x5gjs0a6/example.zip?dl=1",
}


class Story(datasets.GeneratorBasedBuilder):
    # version must be "x.y.z' form
    VERSION = datasets.Version("0.0.0")

    BUILDER_CONFIGS = [
        datasets.BuilderConfig(name="definition", version=VERSION, description="definition"),
        datasets.BuilderConfig(name="example", version=VERSION, description="example"),
    ]

    # This config is applied when user load dataset without "name".
    DEFAULT_CONFIG_NAME = "definition"

    def _info(self):
        # This method specifies the datasets.DatasetInfo object which contains information
        #                                                            and typings for the dataset

        if self.config.name == "definition":
            # These are the features of your dataset like images, labels ...
            features = datasets.Features(
                {
                    "wisdom": datasets.Value("string"),
                    "def": datasets.Value("string"),

                }
            )
        elif self.config.name == "example":
            features = datasets.Features(
                {
                    "wisdom": datasets.Value("string"),
                    "eg": datasets.Value("string"),
                }
            )
        else:
            raise NotImplementedError(f"Wrong name: {self.config.name}")

        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=features,
            supervised_keys=None,
            homepage=_HOMEPAGE,
            license=_LICENSE,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        """Returns SplitGenerators."""
        # This method is used when user loads dataset.
        # dl_manager can be used to download and extract the dataset
        # and also can set split depending onf the configuration

        # Downloading data with _URLs
        downloaded_files = dl_manager.download_and_extract(_URLs[self.config.name])

        dtp = 'def' if self.config.name == "definition" else 'eg'

        train_path = os.path.join(downloaded_files, f'train_wisdom2{dtp}.tsv')
        val_path = os.path.join(downloaded_files, f'val_wisdom2{dtp}.tsv')
        test_path = os.path.join(downloaded_files, f'test_wisdom2{dtp}.tsv')

        return [
            # These gen_kwargs will be passed to _generate_examples
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={"filepath": train_path, "split": "train"},
            ),
            datasets.SplitGenerator(
                name=datasets.Split.VALIDATION,
                gen_kwargs={"filepath": val_path, "split": "validation"},
            ),
            datasets.SplitGenerator(
                name=datasets.Split.TEST,
                gen_kwargs={"filepath": test_path, "split": "test"},
            ),
        ]

    def _generate_examples(self, filepath, split):
        # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
        """ Yields examples as (key, example) tuples. """
        # This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
        # The `key` is here for legacy reason (tfds) and is not important in itself.

        with open(filepath, encoding="utf-8") as f:
            tsv_reader = csv.reader(f, delimiter="\t")
            for id_, row in enumerate(tsv_reader):
                if id_ == 0:
                    continue  # first row shows column info

                if self.config.name == "definition":
                    yield id_, {
                        "wisdom": row[0],
                        "def": row[1],
                    }
                elif self.config.name == "example":
                    yield id_, {
                        "wisdom": row[0],
                        "eg": row[1],
                    }
                else:
                    raise NotImplementedError(f"Wrong name: {self.config.name}")