Datasets:

Tasks:
Other
Languages:
French
Multilinguality:
monolingual
Size Categories:
unknown
Annotations Creators:
machine-generated
Source Datasets:
original
DOI:
License:
Gaëtan Caillaut commited on
Commit
d0f60dc
1 Parent(s): 6453182

update dataset

Browse files
Files changed (2) hide show
  1. .gitignore +2 -0
  2. frwiki_good_pages_el.py +35 -33
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
1
+ data
2
+ data.tar
frwiki_good_pages_el.py CHANGED
@@ -17,7 +17,8 @@
17
 
18
  import pandas as pd
19
  import re
20
-
 
21
  import datasets
22
  from pathlib import Path
23
 
@@ -60,7 +61,7 @@ _LICENSE = ""
60
  # The HuggingFace dataset library don't host the datasets but only point to the original files
61
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
62
  _URLs = {
63
- "frwiki": "",
64
  }
65
 
66
  _CLASS_LABELS = [
@@ -219,14 +220,14 @@ class FrWikiGoodPagesELDataset(datasets.GeneratorBasedBuilder):
219
  # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
220
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
221
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
222
- # my_urls = _URLs[self.config.name]
223
- # data_dir = dl_manager.download_and_extract(my_urls)
224
  return [
225
  datasets.SplitGenerator(
226
  name=datasets.Split.TRAIN,
227
  # These kwargs will be passed to _generate_examples
228
  gen_kwargs={
229
- "dataset_dir": Path(".", "data", "good-pages"),
230
  "split": "train"
231
  }
232
  )
@@ -234,36 +235,37 @@ class FrWikiGoodPagesELDataset(datasets.GeneratorBasedBuilder):
234
 
235
  def _generate_examples(
236
  # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
237
- self, dataset_dir, split
238
  ):
239
  """ Yields examples as (key, example) tuples. """
240
  # This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
241
  # The `key` is here for legacy reason (tfds) and is not important in itself.
242
 
243
- with open(Path(dataset_dir, "list-good-pages.txt"), "rt", encoding="UTF-8") as f:
244
- good_pages_list = f.read().split("\n")
245
-
246
- wiki_df = pd.read_csv(Path(dataset_dir, "scrapped", "final-dataset.csv"),
247
- dtype=str, na_filter=False)
248
-
249
- title2qid = dict(zip(wiki_df["title"], wiki_df["qid"]))
250
- title2path = dict(zip(wiki_df["title"], wiki_df["path"]))
251
- title2wikipedia = dict(
252
- zip(wiki_df["title"], wiki_df["wikipedia_description"]))
253
- title2wikidata = dict(
254
- zip(wiki_df["title"], wiki_df["wikidata_description"]))
255
-
256
- good_pages_list = [
257
- gp.strip()
258
- for gp in good_pages_list
259
- if title2path[gp] != "" and gp.strip() != ""
260
- ]
261
-
262
- for id, title in enumerate(good_pages_list):
263
- qid = title2qid[title]
264
- path = title2path[title]
265
- text = read_file(path)
266
-
267
- features = text_to_el_features(
268
- qid, title, text, title2qid, title2wikipedia, title2wikidata)
269
- yield id, features
 
17
 
18
  import pandas as pd
19
  import re
20
+ import gzip
21
+ import json
22
  import datasets
23
  from pathlib import Path
24
 
61
  # The HuggingFace dataset library don't host the datasets but only point to the original files
62
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
63
  _URLs = {
64
+ "frwiki": "data.tar.gz",
65
  }
66
 
67
  _CLASS_LABELS = [
220
  # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
221
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
222
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
223
+ my_urls = _URLs[self.config.name]
224
+ data_dir = dl_manager.download_and_extract(my_urls)
225
  return [
226
  datasets.SplitGenerator(
227
  name=datasets.Split.TRAIN,
228
  # These kwargs will be passed to _generate_examples
229
  gen_kwargs={
230
+ "data_dir": Path(data_dir, "data"),
231
  "split": "train"
232
  }
233
  )
235
 
236
  def _generate_examples(
237
  # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
238
+ self, data_dir, split
239
  ):
240
  """ Yields examples as (key, example) tuples. """
241
  # This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
242
  # The `key` is here for legacy reason (tfds) and is not important in itself.
243
 
244
+ with open(Path(data_dir, "list-good-pages.txt"), "rt", encoding="UTF-8") as f:
245
+ good_pages_list = set(f.read().split("\n")).difference("")
246
+
247
+ entities_path = Path(data_dir, "entities.jsonl.gz")
248
+ corpus_path = Path(data_dir, "corpus.jsonl.gz")
249
+ title2wikipedia = {}
250
+ title2wikidata = {}
251
+ title2qid = {}
252
+ with gzip.open(entities_path, "rt", encoding="UTF-8") as ent_file:
253
+ for line in ent_file:
254
+ item = json.loads(line, parse_int=lambda x: x,
255
+ parse_float=lambda x: x, parse_constant=lambda x: x)
256
+ title = item["title"]
257
+ title2wikipedia[title] = item["wikipedia_description"]
258
+ title2wikidata[title] = item["wikidata_description"]
259
+ title2qid[title] = item["qid"]
260
+
261
+ with gzip.open(corpus_path, "rt", encoding="UTF-8") as crps_file:
262
+ for id, line in enumerate(crps_file):
263
+ item = json.loads(line, parse_int=lambda x: x,
264
+ parse_float=lambda x: x, parse_constant=lambda x: x)
265
+ qid = item["qid"]
266
+ title = item["title"]
267
+ text = item["text"]
268
+
269
+ features = text_to_el_features(
270
+ qid, title, text, title2qid, title2wikipedia, title2wikidata)
271
+ yield id, features