Datasets:
rcds
/

Modalities:
Tabular
Text
Languages:
English
Libraries:
Datasets
License:
Skatinger commited on
Commit
88c5ff8
1 Parent(s): 95a9f72

Upload 5 files

Browse files
.gitattributes CHANGED
@@ -52,3 +52,7 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
52
  *.jpg filter=lfs diff=lfs merge=lfs -text
53
  *.jpeg filter=lfs diff=lfs merge=lfs -text
54
  *.webp filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
52
  *.jpg filter=lfs diff=lfs merge=lfs -text
53
  *.jpeg filter=lfs diff=lfs merge=lfs -text
54
  *.webp filter=lfs diff=lfs merge=lfs -text
55
+ data/original_4096.jsonl filter=lfs diff=lfs merge=lfs -text
56
+ data/original_512.jsonl filter=lfs diff=lfs merge=lfs -text
57
+ data/paraphrased_4096.jsonl filter=lfs diff=lfs merge=lfs -text
58
+ data/paraphrased_512.jsonl filter=lfs diff=lfs merge=lfs -text
data/original_4096.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ecdbe45c982c8c9d3292df2f3e38e73f7c59f76802b3cdb4021183caebcf9f9
3
+ size 811127234
data/original_512.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5485019889c5bbcaf38a571bf1680d81a2bb03376b5689d0223dc578d3eb4c4e
3
+ size 829447436
data/paraphrased_4096.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf17100dcc1e4d1daaa3edd157a3c1e3b828594d64a9253f9007e8523234e721
3
+ size 558188350
data/paraphrased_512.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5485019889c5bbcaf38a571bf1680d81a2bb03376b5689d0223dc578d3eb4c4e
3
+ size 829447436
wikipedia-for-mask-filling.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # dataset loading script for huggingface
2
+ import datasets
3
+ import json
4
+ try:
5
+ import lzma as xz
6
+ except ImportError:
7
+ import pylzma as xz
8
+
9
+ datasets.logging.set_verbosity_info()
10
+ logger = datasets.logging.get_logger(__name__)
11
+
12
+ _DESCRIPTION = """\
13
+ """
14
+
15
+ _HOMEPAGE = "https://skatinger.github.io/master_thesis/",
16
+
17
+ _LICENSE = ""
18
+
19
+ _CITATION = ""
20
+
21
+ _TYPES = ["original", "paraphrased"]
22
+
23
+ _SIZES = [4096, 512]
24
+
25
+ _URLS = {
26
+ "original_4096": "data/original_4096.jsonl.xz",
27
+ "original_512": "data/original_512.jsonl.xz",
28
+ "paraphrased_4096": "data/paraphrased_4096.jsonl.xz",
29
+ "paraphrased_512": "data/paraphrased_512.jsonl.xz"
30
+ }
31
+
32
+
33
+ class WikipediaForMaskFillingConfig(datasets.BuilderConfig):
34
+ """BuilderConfig for WikipediaForMaskFilling.
35
+
36
+ features: *list[string]*, list of the features that will appear in the
37
+ feature dict. Should not include "label".
38
+ **kwargs: keyword arguments forwarded to super
39
+ """
40
+
41
+ def __init__(self, type, size=4096, **kwargs):
42
+ """BuilderConfig for WikipediaForMaskFilling.
43
+
44
+ Args:
45
+ **kwargs: keyword arguments forwarded to super.
46
+ """
47
+
48
+ # Version history:
49
+ # 1.0.0: first version
50
+ super().__init__(**kwargs)
51
+ self.size = size
52
+ self.type = type
53
+
54
+
55
+ class WikipediaForMaskFilling(datasets.GeneratorBasedBuilder):
56
+ """WikipediaForMaskFilling dataset."""
57
+
58
+ BUILDER_CONFIGS = [
59
+ WikipediaForMaskFillingConfig(
60
+ name="original_4096",
61
+ version=datasets.Version("1.0.0"),
62
+ description="Part of the dataset with original texts and masks, with text chunks split into size of max 4096 tokens (Longformer).",
63
+ max_tokens=4096,
64
+ type="original"
65
+ ),
66
+ WikipediaForMaskFillingConfig(
67
+ name="original_512",
68
+ version=datasets.Version("1.0.0"),
69
+ description="text chunks split into size of max 512 tokens (roberta).",
70
+ max_tokens=512,
71
+ type="original"
72
+ ),
73
+ WikipediaForMaskFillingConfig(
74
+ name="paraphrased_4096",
75
+ version=datasets.Version("1.0.0"),
76
+ description="Part of the dataset with paraphrased texts and masks, with text chunks split into size of max 4096 tokens (Longformer).",
77
+ max_tokens=4096,
78
+ type="paraphrased"
79
+ ),
80
+ WikipediaForMaskFillingConfig(
81
+ name="paraphrased_512",
82
+ version=datasets.Version("1.0.0"),
83
+ description="Paraphrased text chunks split into size of max 512 tokens (roberta).",
84
+ max_tokens=512,
85
+ type="paraphrased"
86
+ )
87
+ ]
88
+
89
+ def _info(self):
90
+ return datasets.DatasetInfo(
91
+ description=_DESCRIPTION,
92
+ features=datasets.Features(
93
+ {
94
+ "texts": datasets.Value("string"),
95
+ "masks": datasets.Sequence(datasets.Value("string")),
96
+ }
97
+ ),
98
+ # No default supervised_keys (as we have to pass both question
99
+ # and context as input).
100
+ supervised_keys=None,
101
+ homepage=_HOMEPAGE,
102
+ citation=_CITATION,
103
+ )
104
+
105
+ def _split_generators(self, dl_manager):
106
+ type = self.config.type
107
+ size = self.config.size
108
+ urls_to_download = f"data/{type}_{size}.jsonl.xz"
109
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
110
+
111
+ return [
112
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
113
+ ]
114
+
115
+ def _generate_examples(self, filepath):
116
+ _id = 0
117
+ with open(filepath, encoding="utf-8") as f:
118
+ try:
119
+ with xz.open(filepath) as f:
120
+ for line in f:
121
+ data = json.loads(line)
122
+ yield _id, {
123
+ "texts": data["texts"],
124
+ "masks": data["masks"]
125
+ }
126
+ _id += 1
127
+ except Exception:
128
+ logger.exception("Error while processing file %s", filepath)