Rename wikitext.py to text_for_tis.py
Browse files- text_for_tis.py +122 -0
- wikitext.py +0 -171
text_for_tis.py
ADDED
@@ -0,0 +1,122 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""TIS Daten aus Hamburg"""
|
2 |
+
|
3 |
+
|
4 |
+
import datasets
|
5 |
+
|
6 |
+
|
7 |
+
# Find for instance the citation on arxiv or on the dataset repo/website
|
8 |
+
_CITATION = """\
|
9 |
+
@article{lif-15,
|
10 |
+
title = "LIF 15 LI Hamburg",
|
11 |
+
journal = "Data",
|
12 |
+
volume = "2",
|
13 |
+
number = "2",
|
14 |
+
year = "2023",
|
15 |
+
url = "https://li-hamburg.de",
|
16 |
+
pages = "313--330",
|
17 |
+
}
|
18 |
+
"""
|
19 |
+
|
20 |
+
# TODO: Add description of the dataset here
|
21 |
+
# You can copy an official description
|
22 |
+
_DESCRIPTION = """\
|
23 |
+
Daten von LIF 15 zum TIS System für Fortbildungen
|
24 |
+
"""
|
25 |
+
|
26 |
+
_HOMEPAGE = "https://li-hamburg.de"
|
27 |
+
|
28 |
+
# TODO: Add the licence for the dataset here if you can find it
|
29 |
+
_LICENSE = "LDC User Agreement for Non-Members"
|
30 |
+
|
31 |
+
# TODO: Add link to the official dataset URLs here
|
32 |
+
# The HuggingFace dataset library don't host the datasets but only point to the original files
|
33 |
+
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
34 |
+
_URL = "alexkueck/tis"
|
35 |
+
_TRAINING_FILE = "tis.train.txt"
|
36 |
+
#_DEV_FILE = "tis.valid.txt"
|
37 |
+
_TEST_FILE = "tis.test.txt"
|
38 |
+
|
39 |
+
|
40 |
+
class TISConfig(datasets.BuilderConfig):
|
41 |
+
"""BuilderConfig for PtbTextOnly"""
|
42 |
+
|
43 |
+
def __init__(self, **kwargs):
|
44 |
+
"""BuilderConfig PtbTextOnly.
|
45 |
+
Args:
|
46 |
+
**kwargs: keyword arguments forwarded to super.
|
47 |
+
"""
|
48 |
+
super(TISConfig, self).__init__(**kwargs)
|
49 |
+
|
50 |
+
|
51 |
+
class TIS(datasets.GeneratorBasedBuilder):
|
52 |
+
"""Load the Penn Treebank dataset."""
|
53 |
+
|
54 |
+
VERSION = datasets.Version("1.1.0")
|
55 |
+
|
56 |
+
# This is an example of a dataset with multiple configurations.
|
57 |
+
# If you don't want/need to define several sub-sets in your dataset,
|
58 |
+
# just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
|
59 |
+
|
60 |
+
# If you need to make complex sub-parts in the datasets with configurable options
|
61 |
+
# You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
|
62 |
+
# BUILDER_CONFIG_CLASS = MyBuilderConfig
|
63 |
+
|
64 |
+
# You will be able to load one or the other configurations in the following list with
|
65 |
+
# data = datasets.load_dataset('my_dataset', 'first_domain')
|
66 |
+
# data = datasets.load_dataset('my_dataset', 'second_domain')
|
67 |
+
BUILDER_CONFIGS = [
|
68 |
+
TISConfig(
|
69 |
+
name="tis",
|
70 |
+
version=VERSION,
|
71 |
+
description="Load TIS dataset",
|
72 |
+
),
|
73 |
+
]
|
74 |
+
|
75 |
+
def _info(self):
|
76 |
+
features = datasets.Features({"sentence": datasets.Value("string")})
|
77 |
+
return datasets.DatasetInfo(
|
78 |
+
# This is the description that will appear on the datasets page.
|
79 |
+
description=_DESCRIPTION,
|
80 |
+
# This defines the different columns of the dataset and their types
|
81 |
+
features=features, # Here we define them above because they are different between the two configurations
|
82 |
+
# If there's a common (input, target) tuple from the features,
|
83 |
+
# specify them here. They'll be used if as_supervised=True in
|
84 |
+
# builder.as_dataset.
|
85 |
+
supervised_keys=None,
|
86 |
+
# Homepage of the dataset for documentation
|
87 |
+
homepage=_HOMEPAGE,
|
88 |
+
# License for the dataset if available
|
89 |
+
license=_LICENSE,
|
90 |
+
# Citation for the dataset
|
91 |
+
citation=_CITATION,
|
92 |
+
)
|
93 |
+
|
94 |
+
def _split_generators(self, dl_manager):
|
95 |
+
"""Returns SplitGenerators."""
|
96 |
+
# TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
|
97 |
+
# If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
|
98 |
+
|
99 |
+
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
|
100 |
+
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
101 |
+
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
102 |
+
my_urls = {
|
103 |
+
"train": f"{_URL}{_TRAINING_FILE}",
|
104 |
+
#"dev": f"{_URL}{_DEV_FILE}",
|
105 |
+
"test": f"{_URL}{_TEST_FILE}",
|
106 |
+
}
|
107 |
+
data_dir = dl_manager.download_and_extract(my_urls)
|
108 |
+
return [
|
109 |
+
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_dir["train"]}),
|
110 |
+
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_dir["test"]}),
|
111 |
+
#datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": data_dir["dev"]}),
|
112 |
+
]
|
113 |
+
|
114 |
+
def _generate_examples(self, filepath):
|
115 |
+
"""Yields examples."""
|
116 |
+
# TODO: This method will receive as arguments the `gen_kwargs` defined in the previous `_split_generators` method.
|
117 |
+
# It is in charge of opening the given file and yielding (key, example) tuples from the dataset
|
118 |
+
# The key is not important, it's more here for legacy reason (legacy from tfds)
|
119 |
+
with open(filepath, encoding="utf-8") as f:
|
120 |
+
for id_, line in enumerate(f):
|
121 |
+
line = line.strip()
|
122 |
+
yield id_, {"sentence": line}
|
wikitext.py
DELETED
@@ -1,171 +0,0 @@
|
|
1 |
-
"""TIS Daten aus Hamburg"""
|
2 |
-
|
3 |
-
|
4 |
-
import os
|
5 |
-
|
6 |
-
import datasets
|
7 |
-
|
8 |
-
|
9 |
-
_CITATION = """\
|
10 |
-
@misc{merity2016pointer,
|
11 |
-
title={TIS},
|
12 |
-
author={LIF15 HH},
|
13 |
-
year={2023},
|
14 |
-
eprint={1609.07843},
|
15 |
-
archivePrefix={arXiv},
|
16 |
-
primaryClass={cs.CL}
|
17 |
-
}
|
18 |
-
"""
|
19 |
-
|
20 |
-
_DESCRIPTION = """\
|
21 |
-
Informationen zum Umgang mit TIs
|
22 |
-
"""
|
23 |
-
_HOMEPAGE = "https://li.hamburg.de/"
|
24 |
-
_LICENSE = "Creative Commons Attribution-ShareAlike 4.0 International (CC BY-SA 4.0)"
|
25 |
-
_DATA_URL = "https://li.hamburg.de/"
|
26 |
-
|
27 |
-
|
28 |
-
class TISConfig(datasets.BuilderConfig):
|
29 |
-
"""BuilderConfig for GLUE."""
|
30 |
-
|
31 |
-
def __init__(self, title, **kwargs):
|
32 |
-
"""BuilderConfig for TIS
|
33 |
-
|
34 |
-
Args:
|
35 |
-
title: `string`, title for follwoing content
|
36 |
-
**kwargs: keyword arguments forwarded to super.
|
37 |
-
"""
|
38 |
-
super(TISConfig, self).__init__(
|
39 |
-
version=datasets.Version(
|
40 |
-
"1.0.0",
|
41 |
-
),
|
42 |
-
**kwargs,
|
43 |
-
)
|
44 |
-
self.title = title
|
45 |
-
|
46 |
-
|
47 |
-
class TIS(datasets.GeneratorBasedBuilder):
|
48 |
-
|
49 |
-
VERSION = datasets.Version("0.1.0")
|
50 |
-
BUILDER_CONFIGS = [
|
51 |
-
TISConfig(
|
52 |
-
name="li-tis-01",
|
53 |
-
title=title,
|
54 |
-
description="Word level dataset. No processing is needed other than replacing newlines with <eos> tokens.",
|
55 |
-
),
|
56 |
-
]
|
57 |
-
|
58 |
-
def _info(self):
|
59 |
-
# TODO(wikitext): Specifies the datasets.DatasetInfo object
|
60 |
-
return datasets.DatasetInfo(
|
61 |
-
# This is the description that will appear on the datasets page.
|
62 |
-
description=_DESCRIPTION,
|
63 |
-
# datasets.features.FeatureConnectors
|
64 |
-
features=datasets.Features(
|
65 |
-
{
|
66 |
-
"text": datasets.Value("string")
|
67 |
-
# These are the features of your dataset like images, labels ...
|
68 |
-
}
|
69 |
-
),
|
70 |
-
# If there's a common (input, target) tuple from the features,
|
71 |
-
# specify them here. They'll be used if as_supervised=True in
|
72 |
-
# builder.as_dataset.
|
73 |
-
supervised_keys=None,
|
74 |
-
homepage=_HOMEPAGE,
|
75 |
-
license=_LICENSE,
|
76 |
-
citation=_CITATION,
|
77 |
-
)
|
78 |
-
|
79 |
-
def _split_generators(self, dl_manager):
|
80 |
-
"""Returns SplitGenerators."""
|
81 |
-
# TODO(wikitext): Downloads the data and defines the splits
|
82 |
-
# dl_manager is a datasets.download.DownloadManager that can be used to
|
83 |
-
# download and extract URLs
|
84 |
-
if self.config.name == "wikitext-103-v1":
|
85 |
-
data_file = dl_manager.download_and_extract(self.config.data_url)
|
86 |
-
data_dir = os.path.join(data_file, "wikitext-103")
|
87 |
-
return [
|
88 |
-
datasets.SplitGenerator(
|
89 |
-
name=datasets.Split.TEST,
|
90 |
-
gen_kwargs={"data_file": os.path.join(data_dir, "wiki.test.tokens"), "split": "test"},
|
91 |
-
),
|
92 |
-
datasets.SplitGenerator(
|
93 |
-
name=datasets.Split.TRAIN,
|
94 |
-
gen_kwargs={"data_file": os.path.join(data_dir, "wiki.train.tokens"), "split": "train"},
|
95 |
-
),
|
96 |
-
datasets.SplitGenerator(
|
97 |
-
name=datasets.Split.VALIDATION,
|
98 |
-
gen_kwargs={"data_file": os.path.join(data_dir, "wiki.valid.tokens"), "split": "valid"},
|
99 |
-
),
|
100 |
-
]
|
101 |
-
else:
|
102 |
-
if self.config.name == "wikitext-103-raw-v1":
|
103 |
-
data_file = dl_manager.download_and_extract(self.config.data_url)
|
104 |
-
data_dir = os.path.join(data_file, "wikitext-103-raw")
|
105 |
-
return [
|
106 |
-
datasets.SplitGenerator(
|
107 |
-
name=datasets.Split.TEST,
|
108 |
-
gen_kwargs={"data_file": os.path.join(data_dir, "wiki.test.raw"), "split": "test"},
|
109 |
-
),
|
110 |
-
datasets.SplitGenerator(
|
111 |
-
name=datasets.Split.TRAIN,
|
112 |
-
gen_kwargs={"data_file": os.path.join(data_dir, "wiki.train.raw"), "split": "train"},
|
113 |
-
),
|
114 |
-
datasets.SplitGenerator(
|
115 |
-
name=datasets.Split.VALIDATION,
|
116 |
-
gen_kwargs={"data_file": os.path.join(data_dir, "wiki.valid.raw"), "split": "valid"},
|
117 |
-
),
|
118 |
-
]
|
119 |
-
else:
|
120 |
-
if self.config.name == "wikitext-2-raw-v1":
|
121 |
-
data_file = dl_manager.download_and_extract(self.config.data_url)
|
122 |
-
data_dir = os.path.join(data_file, "wikitext-2-raw")
|
123 |
-
return [
|
124 |
-
datasets.SplitGenerator(
|
125 |
-
name=datasets.Split.TEST,
|
126 |
-
gen_kwargs={"data_file": os.path.join(data_dir, "wiki.test.raw"), "split": "test"},
|
127 |
-
),
|
128 |
-
datasets.SplitGenerator(
|
129 |
-
name=datasets.Split.TRAIN,
|
130 |
-
gen_kwargs={"data_file": os.path.join(data_dir, "wiki.train.raw"), "split": "train"},
|
131 |
-
),
|
132 |
-
datasets.SplitGenerator(
|
133 |
-
name=datasets.Split.VALIDATION,
|
134 |
-
gen_kwargs={"data_file": os.path.join(data_dir, "wiki.valid.raw"), "split": "valid"},
|
135 |
-
),
|
136 |
-
]
|
137 |
-
else:
|
138 |
-
if self.config.name == "wikitext-2-v1":
|
139 |
-
data_file = dl_manager.download_and_extract(self.config.data_url)
|
140 |
-
data_dir = os.path.join(data_file, "wikitext-2")
|
141 |
-
return [
|
142 |
-
datasets.SplitGenerator(
|
143 |
-
name=datasets.Split.TEST,
|
144 |
-
gen_kwargs={"data_file": os.path.join(data_dir, "wiki.test.tokens"), "split": "test"},
|
145 |
-
),
|
146 |
-
datasets.SplitGenerator(
|
147 |
-
name=datasets.Split.TRAIN,
|
148 |
-
gen_kwargs={
|
149 |
-
"data_file": os.path.join(data_dir, "wiki.train.tokens"),
|
150 |
-
"split": "train",
|
151 |
-
},
|
152 |
-
),
|
153 |
-
datasets.SplitGenerator(
|
154 |
-
name=datasets.Split.VALIDATION,
|
155 |
-
gen_kwargs={
|
156 |
-
"data_file": os.path.join(data_dir, "wiki.valid.tokens"),
|
157 |
-
"split": "valid",
|
158 |
-
},
|
159 |
-
),
|
160 |
-
]
|
161 |
-
|
162 |
-
def _generate_examples(self, data_file, split):
|
163 |
-
|
164 |
-
"""Yields examples."""
|
165 |
-
# TODO(wikitext): Yields (key, example) tuples from the dataset
|
166 |
-
with open(data_file, encoding="utf-8") as f:
|
167 |
-
for idx, row in enumerate(f):
|
168 |
-
if row.strip():
|
169 |
-
yield idx, {"text": row}
|
170 |
-
else:
|
171 |
-
yield idx, {"text": ""}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|