File size: 2,930 Bytes
0896b16
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23cc825
0896b16
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
import datasets


_DESCRIPTION = """\

"""
_URL = "https://www.gutenberg.org/files/2554/2554-h/2554-h.htm"
_DATA_URL = "https://raw.githubusercontent.com/patrickvonplaten/datasets/master/crime_and_punishment.txt"


class CrimeAndPunishConfig(datasets.BuilderConfig):
    """BuilderConfig for Crime and Punish."""

    def __init__(self, data_url, **kwargs):
        """BuilderConfig for BlogAuthorship

        Args:
          data_url: `string`, url to the dataset (word or raw level)
          **kwargs: keyword arguments forwarded to super.
        """
        super(CrimeAndPunishConfig, self).__init__(
            version=datasets.Version(
                "1.0.0",
            ),
            **kwargs,
        )
        self.data_url = data_url


class CrimeAndPunish(datasets.GeneratorBasedBuilder):

    VERSION = datasets.Version("0.1.0")
    BUILDER_CONFIGS = [
        CrimeAndPunishConfig(
            name="crime-and-punish",
            data_url=_DATA_URL,
            description="word level dataset. No processing is needed other than replacing newlines with <eos> tokens.",
        ),
    ]

    def _info(self):
        return datasets.DatasetInfo(
            # This is the description that will appear on the datasets page.
            description=_DESCRIPTION,
            # datasets.features.FeatureConnectors
            features=datasets.Features(
                {
                    "line": datasets.Value("string"),
                }
            ),
            # If there's a common (input, target) tuple from the features,
            # specify them here. They'll be used if as_supervised=True in
            # builder.as_dataset.
            supervised_keys=None,
            homepage=_URL,
        )

    def _split_generators(self, dl_manager):
        """Returns SplitGenerators."""

        if self.config.name == "crime-and-punish":
            data = dl_manager.download_and_extract(self.config.data_url)

            return [
                datasets.SplitGenerator(
                    name=datasets.Split.TRAIN,
                    gen_kwargs={"data_file": data, "split": "train"},
                ),
            ]
        else:
            raise ValueError(f"{self.config.name} does not exist")

    def _generate_examples(self, data_file, split):

        with open(data_file, "rb") as f:
            id_counter = 0
            add_text = False
            crime_and_punishment_occ_counter = 0

            for line in f:
                line = line.decode("UTF-8")
                if "CRIME AND PUNISHMENT" in line:
                    crime_and_punishment_occ_counter += 1
                    add_text = crime_and_punishment_occ_counter == 3
                if "End of Project" in line:
                    add_text = False

                if add_text is True:
                    result = {"line": line}
                    id_counter += 1
                    yield id_counter, result