Matīss commited on
Commit
78903f7
1 Parent(s): a43a7d3

Upload liv4ever.py

Browse files
Files changed (1) hide show
  1. liv4ever.py +49 -39
liv4ever.py CHANGED
@@ -66,10 +66,11 @@ _LICENSE = "CC BY-NC-SA 4.0"
66
  # TODO: Add link to the official dataset URLs here
67
  # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
68
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
 
69
  _URLS = {
70
- "train": "https://huggingface.co/datasets/tartuNLP/liv4ever/raw/main/train.json",
71
- "dev": "https://huggingface.co/datasets/tartuNLP/liv4ever/raw/main/dev.json",
72
- "test": "https://huggingface.co/datasets/tartuNLP/liv4ever/raw/main/test.json",
73
  }
74
 
75
 
@@ -90,12 +91,12 @@ class liv4ever(datasets.GeneratorBasedBuilder):
90
  # You will be able to load one or the other configurations in the following list with
91
  # data = datasets.load_dataset('my_dataset', 'train')
92
  # data = datasets.load_dataset('my_dataset', 'dev')
93
- BUILDER_CONFIGS = [
94
- datasets.BuilderConfig(name="train", version=VERSION, description="This part of my dataset covers a first domain"),
95
- datasets.BuilderConfig(name="dev", version=VERSION, description="This part of my dataset covers a second domain"),
96
- ]
97
 
98
- DEFAULT_CONFIG_NAME = "train" # It's not mandatory to have a default configuration. Just use one if it make sense.
99
 
100
  def _info(self):
101
  # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
@@ -130,33 +131,41 @@ class liv4ever(datasets.GeneratorBasedBuilder):
130
  # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
131
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
132
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
133
- urls = _URLS[self.config.name]
134
- data_dir = dl_manager.download_and_extract(urls)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
135
  return [
136
- datasets.SplitGenerator(
137
- name=datasets.Split.TRAIN,
138
- # These kwargs will be passed to _generate_examples
139
- gen_kwargs={
140
- "filepath": os.path.join(data_dir),
141
- "split": "train",
142
- },
143
- ),
144
- datasets.SplitGenerator(
145
- name=datasets.Split.TEST,
146
- # These kwargs will be passed to _generate_examples
147
- gen_kwargs={
148
- "filepath": os.path.join(data_dir),
149
- "split": "test"
150
- },
151
- ),
152
- datasets.SplitGenerator(
153
- name=datasets.Split.VALIDATION,
154
- # These kwargs will be passed to _generate_examples
155
- gen_kwargs={
156
- "filepath": os.path.join(data_dir),
157
- "split": "dev",
158
- },
159
- ),
160
  ]
161
 
162
  # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
@@ -164,11 +173,12 @@ class liv4ever(datasets.GeneratorBasedBuilder):
164
  # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
165
  # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
166
  with open(filepath, encoding="utf-8") as f:
167
- for key, row in enumerate(f):
168
- data = json.loads(row)
 
169
  # Yields examples as (key, example) tuples
170
  yield key, {
171
- "source": data["source"],
172
- "en": data["en"],
173
- "liv": data["liv"],
174
  }
 
66
  # TODO: Add link to the official dataset URLs here
67
  # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
68
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
69
+ _URL = "https://huggingface.co/datasets/tartuNLP/liv4ever/raw/main/"
70
  _URLS = {
71
+ "train": _URL + "train.json",
72
+ "dev": _URL + "dev.json",
73
+ "test": _URL + "test.json",
74
  }
75
 
76
 
 
91
  # You will be able to load one or the other configurations in the following list with
92
  # data = datasets.load_dataset('my_dataset', 'train')
93
  # data = datasets.load_dataset('my_dataset', 'dev')
94
+ # BUILDER_CONFIGS = [
95
+ # datasets.BuilderConfig(name="train", version=VERSION, description="This part of my dataset covers a first domain"),
96
+ # datasets.BuilderConfig(name="dev", version=VERSION, description="This part of my dataset covers a second domain"),
97
+ # ]
98
 
99
+ # DEFAULT_CONFIG_NAME = "train" # It's not mandatory to have a default configuration. Just use one if it make sense.
100
 
101
  def _info(self):
102
  # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
 
131
  # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
132
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
133
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
134
+ # urls = _URLS[self.config.name]
135
+ # data_dir = dl_manager.download_and_extract(urls)
136
+ # return [
137
+ # datasets.SplitGenerator(
138
+ # name=datasets.Split.TRAIN,
139
+ # # These kwargs will be passed to _generate_examples
140
+ # gen_kwargs={
141
+ # "filepath": os.path.join(data_dir),
142
+ # "split": "train",
143
+ # },
144
+ # ),
145
+ # datasets.SplitGenerator(
146
+ # name=datasets.Split.TEST,
147
+ # # These kwargs will be passed to _generate_examples
148
+ # gen_kwargs={
149
+ # "filepath": os.path.join(data_dir),
150
+ # "split": "test"
151
+ # },
152
+ # ),
153
+ # datasets.SplitGenerator(
154
+ # name=datasets.Split.VALIDATION,
155
+ # # These kwargs will be passed to _generate_examples
156
+ # gen_kwargs={
157
+ # "filepath": os.path.join(data_dir),
158
+ # "split": "dev",
159
+ # },
160
+ # ),
161
+ # ]
162
+ urls_to_download = self._URLS
163
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
164
+
165
  return [
166
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
167
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
168
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
169
  ]
170
 
171
  # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
 
173
  # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
174
  # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
175
  with open(filepath, encoding="utf-8") as f:
176
+ jsondata = json.load(f)
177
+
178
+ for sentence in jsondata:
179
  # Yields examples as (key, example) tuples
180
  yield key, {
181
+ "source": sentence["source"],
182
+ "en": sentence["en"],
183
+ "liv": sentence["liv"],
184
  }