TristanThrush commited on
Commit
5f2fd21
1 Parent(s): ec1654a

modified to take advantage of multiple CPUs locally

Browse files
Files changed (2) hide show
  1. README.md +8 -7
  2. wikipedia.py +58 -45
README.md CHANGED
@@ -626,6 +626,10 @@ configs:
626
 
627
  # Dataset Card for Wikipedia
628
 
 
 
 
 
629
  ## Table of Contents
630
  - [Dataset Description](#dataset-description)
631
  - [Dataset Summary](#dataset-summary)
@@ -667,10 +671,10 @@ markdown and unwanted sections (references, etc.).
667
 
668
  The articles are parsed using the ``mwparserfromhell`` tool.
669
 
670
- To load this dataset you need to install Apache Beam and ``mwparserfromhell`` first:
671
 
672
  ```
673
- pip install apache_beam mwparserfromhell
674
  ```
675
 
676
  Then, you can load any subset of Wikipedia per language and per date this way:
@@ -678,11 +682,8 @@ Then, you can load any subset of Wikipedia per language and per date this way:
678
  ```python
679
  from datasets import load_dataset
680
 
681
- load_dataset("wikipedia", language="sw", date="20220120", beam_runner=...)
682
  ```
683
- where you can pass as `beam_runner` any Apache Beam supported runner for (distributed) data processing
684
- (see [here](https://beam.apache.org/documentation/runners/capability-matrix/)).
685
- Pass "DirectRunner" to run it on your machine.
686
 
687
  You can find the full list of languages and dates [here](https://dumps.wikimedia.org/backup-index.html).
688
 
@@ -856,4 +857,4 @@ the text.
856
 
857
  ### Contributions
858
 
859
- Thanks to [@lewtun](https://github.com/lewtun), [@mariamabarham](https://github.com/mariamabarham), [@thomwolf](https://github.com/thomwolf), [@lhoestq](https://github.com/lhoestq), [@patrickvonplaten](https://github.com/patrickvonplaten) for adding this dataset.
 
626
 
627
  # Dataset Card for Wikipedia
628
 
629
+ This repo is a fork of the original Hugging Face Wikipedia repo [here](https://huggingface.co/datasets/wikipedia).
630
+ The difference is that this fork does away with the need for `apache-beam`, and this fork is also very fast if you have a lot of CPUs on your machine.
631
+ It will also use all CPUs available to create a clean Wikipedia pretraining dataset. It takes less than an hour to process all of English wikipedia on a GCP n1-standard-96.
632
+
633
  ## Table of Contents
634
  - [Dataset Description](#dataset-description)
635
  - [Dataset Summary](#dataset-summary)
 
671
 
672
  The articles are parsed using the ``mwparserfromhell`` tool.
673
 
674
+ To load this dataset you need to install ``mwparserfromhell`` first:
675
 
676
  ```
677
+ pip install mwparserfromhell
678
  ```
679
 
680
  Then, you can load any subset of Wikipedia per language and per date this way:
 
682
  ```python
683
  from datasets import load_dataset
684
 
685
+ load_dataset("wikipedia", language="sw", date="20220120")
686
  ```
 
 
 
687
 
688
  You can find the full list of languages and dates [here](https://dumps.wikimedia.org/backup-index.html).
689
 
 
857
 
858
  ### Contributions
859
 
860
+ Thanks to [@lewtun](https://github.com/lewtun), [@mariamabarham](https://github.com/mariamabarham), [@thomwolf](https://github.com/thomwolf), [@lhoestq](https://github.com/lhoestq), [@patrickvonplaten](https://github.com/patrickvonplaten) for adding this dataset.
wikipedia.py CHANGED
@@ -23,6 +23,9 @@ import json
23
  import re
24
  import xml.etree.cElementTree as etree
25
  from urllib.parse import quote
 
 
 
26
 
27
  import datasets
28
 
@@ -904,7 +907,7 @@ class WikipediaConfig(datasets.BuilderConfig):
904
  _DATE = "20220301"
905
 
906
 
907
- class Wikipedia(datasets.BeamBasedBuilder):
908
  """Wikipedia dataset."""
909
 
910
  # Use mirror (your.org) to avoid download caps.
@@ -934,7 +937,7 @@ class Wikipedia(datasets.BeamBasedBuilder):
934
  citation=_CITATION,
935
  )
936
 
937
- def _split_generators(self, dl_manager, pipeline):
938
  def _base_url(lang):
939
  return _BASE_URL_TMPL.format(lang=lang.replace("-", "_"), date=self.config.date)
940
 
@@ -963,9 +966,10 @@ class Wikipedia(datasets.BeamBasedBuilder):
963
  xml_urls.append(_base_url(lang) + fname)
964
 
965
  # Use dictionary since testing mock always returns the same result.
 
 
966
  downloaded_files = dl_manager.download({"xml": xml_urls})
967
- if not pipeline.is_local():
968
- downloaded_files = dl_manager.ship_files_with_pipeline(downloaded_files, pipeline)
969
 
970
  return [
971
  datasets.SplitGenerator( # pylint:disable=g-complex-comprehension
@@ -973,43 +977,40 @@ class Wikipedia(datasets.BeamBasedBuilder):
973
  )
974
  ]
975
 
976
- def _build_pcollection(self, pipeline, filepaths, language):
977
- """Build PCollection of examples in the raw (text) form."""
978
- import apache_beam as beam
979
- import mwparserfromhell
980
 
981
  def _extract_content(filepath):
982
  """Extracts article content from a single WikiMedia XML file."""
983
  logger.info("generating examples from = %s", filepath)
984
- with beam.io.filesystems.FileSystems.open(filepath) as f:
985
- f = bz2.BZ2File(filename=f)
986
- # Workaround due to: https://github.com/tensorflow/tensorflow/issues/33563
987
- utf_f = codecs.getreader("utf-8")(f)
988
- context = etree.iterparse(utf_f, events=("end",))
989
- for unused_event, elem in context:
990
- if not elem.tag.endswith("page"):
991
- continue
992
- namespace = elem.tag[:-4]
993
- title = elem.find(f"./{namespace}title").text
994
- ns = elem.find(f"./{namespace}ns").text
995
- id_ = elem.find(f"./{namespace}id").text
996
- red_ = elem.find(f"./{namespace}redirect")
997
-
998
- # Filter pages that are not in the "main" namespace.
999
- if ns != "0":
1000
- elem.clear()
1001
- continue
1002
-
1003
- raw_content = elem.find(f"./{namespace}revision/{namespace}text").text
1004
  elem.clear()
 
 
 
 
1005
 
1006
- # Filter redirects.
1007
- if raw_content is None or red_ is not None:
1008
- beam.metrics.Metrics.counter(language, "filtered-redirects").inc()
1009
- continue
1010
 
1011
- beam.metrics.Metrics.counter(language, "extracted-examples").inc()
1012
- yield (id_, title, raw_content)
1013
 
1014
  def _clean_content(inputs, language):
1015
  """Cleans raw wikicode to extract text."""
@@ -1017,28 +1018,40 @@ class Wikipedia(datasets.BeamBasedBuilder):
1017
  try:
1018
  text = _parse_and_clean_wikicode(raw_content, parser=mwparserfromhell, language=language)
1019
  except (mwparserfromhell.parser.ParserError) as e:
1020
- beam.metrics.Metrics.counter(language, "parser-error").inc()
1021
  logger.error("mwparserfromhell ParseError: %s", e)
1022
  return
1023
 
1024
  if not text:
1025
- beam.metrics.Metrics.counter(language, "empty-clean-examples").inc()
1026
  return
1027
 
1028
  url = _construct_url(title, language)
1029
 
1030
- beam.metrics.Metrics.counter(language, "cleaned-examples").inc()
1031
 
1032
- yield id_, {"id": id_, "url": url, "title": title, "text": text}
 
 
 
 
 
 
 
 
 
 
 
1033
 
1034
- return (
1035
- pipeline
1036
- | "Initialize" >> beam.Create(filepaths)
1037
- | "Extract content" >> beam.FlatMap(_extract_content)
1038
- | "Distribute" >> beam.transforms.Reshuffle()
1039
- | "Clean content" >> beam.FlatMap(_clean_content, language=language)
1040
- )
 
 
1041
 
 
1042
 
1043
  def _parse_and_clean_wikicode(raw_content, parser, language):
1044
  """Strips formatting and unwanted sections from raw page content."""
 
23
  import re
24
  import xml.etree.cElementTree as etree
25
  from urllib.parse import quote
26
+ import mwparserfromhell
27
+ from multiprocessing import Process, Manager
28
+ from tqdm import tqdm
29
 
30
  import datasets
31
 
 
907
  _DATE = "20220301"
908
 
909
 
910
+ class Wikipedia(datasets.GeneratorBasedBuilder):
911
  """Wikipedia dataset."""
912
 
913
  # Use mirror (your.org) to avoid download caps.
 
937
  citation=_CITATION,
938
  )
939
 
940
+ def _split_generators(self, dl_manager):
941
  def _base_url(lang):
942
  return _BASE_URL_TMPL.format(lang=lang.replace("-", "_"), date=self.config.date)
943
 
 
966
  xml_urls.append(_base_url(lang) + fname)
967
 
968
  # Use dictionary since testing mock always returns the same result.
969
+
970
+ print("Dowloading Wikipedia dump")
971
  downloaded_files = dl_manager.download({"xml": xml_urls})
972
+ print("Finished downloading Wikipedia dump")
 
973
 
974
  return [
975
  datasets.SplitGenerator( # pylint:disable=g-complex-comprehension
 
977
  )
978
  ]
979
 
980
+ def _generate_examples(self, filepaths, language, no_labels=False):
981
+
 
 
982
 
983
  def _extract_content(filepath):
984
  """Extracts article content from a single WikiMedia XML file."""
985
  logger.info("generating examples from = %s", filepath)
986
+ content = []
987
+ f = bz2.BZ2File(filename=filepath)
988
+ # Workaround due to: https://github.com/tensorflow/tensorflow/issues/33563
989
+ utf_f = codecs.getreader("utf-8")(f)
990
+ context = etree.iterparse(utf_f, events=("end",))
991
+ for unused_event, elem in context:
992
+ if not elem.tag.endswith("page"):
993
+ continue
994
+ namespace = elem.tag[:-4]
995
+ title = elem.find(f"./{namespace}title").text
996
+ ns = elem.find(f"./{namespace}ns").text
997
+ id_ = elem.find(f"./{namespace}id").text
998
+ red_ = elem.find(f"./{namespace}redirect")
999
+
1000
+ # Filter pages that are not in the "main" namespace.
1001
+ if ns != "0":
 
 
 
 
1002
  elem.clear()
1003
+ continue
1004
+
1005
+ raw_content = elem.find(f"./{namespace}revision/{namespace}text").text
1006
+ elem.clear()
1007
 
1008
+ # Filter redirects.
1009
+ if raw_content is None or red_ is not None:
1010
+ continue
 
1011
 
1012
+ content.append((id_, title, raw_content))
1013
+ return content
1014
 
1015
  def _clean_content(inputs, language):
1016
  """Cleans raw wikicode to extract text."""
 
1018
  try:
1019
  text = _parse_and_clean_wikicode(raw_content, parser=mwparserfromhell, language=language)
1020
  except (mwparserfromhell.parser.ParserError) as e:
 
1021
  logger.error("mwparserfromhell ParseError: %s", e)
1022
  return
1023
 
1024
  if not text:
 
1025
  return
1026
 
1027
  url = _construct_url(title, language)
1028
 
1029
+ return id_, {"id": id_, "url": url, "title": title, "text": text}
1030
 
1031
+ print("Parsing and cleaning Wikipedia examples")
1032
+ with Manager() as manager:
1033
+ examples = manager.list()
1034
+ processes = []
1035
+ for filepath in filepaths:
1036
+ def parse_and_clean(examples):
1037
+ content = _extract_content(filepath)
1038
+ for obj in tqdm(content):
1039
+ examples.append(_clean_content(obj, language=language))
1040
+ p = Process(target=parse_and_clean, args=(examples,))
1041
+ p.start()
1042
+ processes.append(p)
1043
 
1044
+ for p in processes:
1045
+ p.join()
1046
+
1047
+ print("Parsed and cleaned Wikipedia examples")
1048
+
1049
+ for example in examples:
1050
+ if example is not None:
1051
+ yield example
1052
+
1053
 
1054
+
1055
 
1056
  def _parse_and_clean_wikicode(raw_content, parser, language):
1057
  """Strips formatting and unwanted sections from raw page content."""