mariosasko commited on
Commit
0343951
1 Parent(s): d8a75a0

Upload wikipedia.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. wikipedia.py +116 -88
wikipedia.py CHANGED
@@ -20,11 +20,15 @@
20
  import bz2
21
  import codecs
22
  import json
 
23
  import re
24
  import warnings
25
  import xml.etree.cElementTree as etree
26
  from urllib.parse import quote
27
 
 
 
 
28
  import datasets
29
 
30
 
@@ -905,7 +909,7 @@ class WikipediaConfig(datasets.BuilderConfig):
905
  _DATE = "20220301"
906
 
907
 
908
- class Wikipedia(datasets.BeamBasedBuilder):
909
  """Wikipedia dataset."""
910
 
911
  # Use mirror (your.org) to avoid download caps.
@@ -917,6 +921,8 @@ class Wikipedia(datasets.BeamBasedBuilder):
917
  ) # pylint:disable=g-complex-comprehension
918
  for lang in WIKIPEDIA_LANGUAGES
919
  ]
 
 
920
 
921
  def _info(self):
922
  warnings.warn(
@@ -941,112 +947,115 @@ class Wikipedia(datasets.BeamBasedBuilder):
941
  supervised_keys=None,
942
  homepage="https://dumps.wikimedia.org",
943
  citation=_CITATION,
 
944
  )
945
 
946
- def _split_generators(self, dl_manager, pipeline):
947
- def _base_url(lang):
948
- return _BASE_URL_TMPL.format(lang=lang.replace("-", "_"), date=self.config.date)
949
-
950
- lang = self.config.language
951
-
952
- info_url = _base_url(lang) + _INFO_FILE
953
- # Use dictionary since testing mock always returns the same result.
954
- downloaded_files = dl_manager.download_and_extract({"info": info_url})
955
-
956
- xml_urls = []
957
- total_bytes = 0
958
- with open(downloaded_files["info"], encoding="utf-8") as f:
959
- dump_info = json.load(f)
960
- multistream_dump_info = dump_info["jobs"]["articlesmultistreamdump"]
961
- assert (
962
- multistream_dump_info["status"] == "done"
963
- ), "Specified dump (%s) multistream status is not 'done': %s" % (
964
- _base_url(lang),
965
- multistream_dump_info["status"],
966
- )
967
 
968
- for fname, info in multistream_dump_info["files"].items():
969
- if ".xml" not in fname:
970
- continue
971
- total_bytes += info["size"]
972
- xml_urls.append(_base_url(lang) + fname)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
973
 
974
  # Use dictionary since testing mock always returns the same result.
975
- downloaded_files = dl_manager.download({"xml": xml_urls})
976
- if not pipeline.is_local():
977
- downloaded_files = dl_manager.ship_files_with_pipeline(downloaded_files, pipeline)
978
 
979
  return [
980
  datasets.SplitGenerator( # pylint:disable=g-complex-comprehension
981
- name=datasets.Split.TRAIN, gen_kwargs={"filepaths": downloaded_files["xml"], "language": lang}
982
  )
983
  ]
984
 
985
- def _build_pcollection(self, pipeline, filepaths, language):
986
- """Build PCollection of examples in the raw (text) form."""
987
- import apache_beam as beam
988
- import mwparserfromhell
989
-
990
- def _extract_content(filepath):
991
- """Extracts article content from a single WikiMedia XML file."""
992
- logger.info("generating examples from = %s", filepath)
993
- with beam.io.filesystems.FileSystems.open(filepath) as f:
994
- f = bz2.BZ2File(filename=f)
995
- # Workaround due to: https://github.com/tensorflow/tensorflow/issues/33563
996
- utf_f = codecs.getreader("utf-8")(f)
997
- context = etree.iterparse(utf_f, events=("end",))
998
- for unused_event, elem in context:
999
- if not elem.tag.endswith("page"):
 
 
1000
  continue
1001
- namespace = elem.tag[:-4]
1002
- title = elem.find(f"./{namespace}title").text
1003
- ns = elem.find(f"./{namespace}ns").text
1004
- id_ = elem.find(f"./{namespace}id").text
1005
- red_ = elem.find(f"./{namespace}redirect")
1006
-
1007
- # Filter pages that are not in the "main" namespace.
1008
- if ns != "0":
1009
- elem.clear()
1010
- continue
1011
-
1012
- raw_content = elem.find(f"./{namespace}revision/{namespace}text").text
1013
- elem.clear()
1014
 
1015
- # Filter redirects.
1016
- if raw_content is None or red_ is not None:
1017
- beam.metrics.Metrics.counter(language, "filtered-redirects").inc()
1018
- continue
1019
 
1020
- beam.metrics.Metrics.counter(language, "extracted-examples").inc()
1021
- yield (id_, title, raw_content)
1022
 
1023
- def _clean_content(inputs, language):
1024
- """Cleans raw wikicode to extract text."""
1025
- id_, title, raw_content = inputs
1026
- try:
1027
- text = _parse_and_clean_wikicode(raw_content, parser=mwparserfromhell, language=language)
1028
- except (mwparserfromhell.parser.ParserError) as e:
1029
- beam.metrics.Metrics.counter(language, "parser-error").inc()
1030
- logger.error("mwparserfromhell ParseError: %s", e)
1031
- return
1032
-
1033
- if not text:
1034
- beam.metrics.Metrics.counter(language, "empty-clean-examples").inc()
1035
- return
 
 
 
1036
 
1037
- url = _construct_url(title, language)
 
 
 
1038
 
1039
- beam.metrics.Metrics.counter(language, "cleaned-examples").inc()
 
1040
 
1041
- yield id_, {"id": id_, "url": url, "title": title, "text": text}
 
 
1042
 
1043
- return (
1044
- pipeline
1045
- | "Initialize" >> beam.Create(filepaths)
1046
- | "Extract content" >> beam.FlatMap(_extract_content)
1047
- | "Distribute" >> beam.transforms.Reshuffle()
1048
- | "Clean content" >> beam.FlatMap(_clean_content, language=language)
1049
- )
1050
 
1051
 
1052
  def _parse_and_clean_wikicode(raw_content, parser, language):
@@ -1111,3 +1120,22 @@ def _parse_and_clean_wikicode(raw_content, parser, language):
1111
  def _construct_url(title, language):
1112
  # See: https://meta.wikimedia.org/wiki/Help:URL
1113
  return f"https://{language}.wikipedia.org/wiki/{quote(title)}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
  import bz2
21
  import codecs
22
  import json
23
+ import os
24
  import re
25
  import warnings
26
  import xml.etree.cElementTree as etree
27
  from urllib.parse import quote
28
 
29
+ import pyarrow as pa
30
+ import pyarrow.parquet as pq
31
+
32
  import datasets
33
 
34
 
 
909
  _DATE = "20220301"
910
 
911
 
912
+ class Wikipedia(datasets.ArrowBasedBuilder):
913
  """Wikipedia dataset."""
914
 
915
  # Use mirror (your.org) to avoid download caps.
 
921
  ) # pylint:disable=g-complex-comprehension
922
  for lang in WIKIPEDIA_LANGUAGES
923
  ]
924
+
925
+ BATCH_SIZE = 1000
926
 
927
  def _info(self):
928
  warnings.warn(
 
947
  supervised_keys=None,
948
  homepage="https://dumps.wikimedia.org",
949
  citation=_CITATION,
950
+ license=_LICENSE,
951
  )
952
 
953
+ def _split_generators(self, dl_manager):
954
+ processed_data_dir = os.path.join("data", self.config.name)
955
+ is_processed = os.path.exists(processed_data_dir)
956
+ if is_processed:
957
+ parquet_urls = sorted(os.path.join(processed_data_dir, parquet_file) for parquet_file in os.listdir(processed_data_dir))
958
+ # Use dictionary since testing mock always returns the same result.
959
+ downloaded_files = dl_manager.download({"parquet": parquet_urls})
960
+ files = downloaded_files["parquet"]
961
+ else:
962
+ def _base_url(lang):
963
+ return _BASE_URL_TMPL.format(lang=lang.replace("-", "_"), date=self.config.date)
 
 
 
 
 
 
 
 
 
 
964
 
965
+ lang = self.config.language
966
+
967
+ info_url = _base_url(lang) + _INFO_FILE
968
+ # Use dictionary since testing mock always returns the same result.
969
+ downloaded_files = dl_manager.download_and_extract({"info": info_url})
970
+
971
+ xml_urls = []
972
+ total_bytes = 0
973
+ with open(downloaded_files["info"], encoding="utf-8") as f:
974
+ dump_info = json.load(f)
975
+ multistream_dump_info = dump_info["jobs"]["articlesmultistreamdump"]
976
+ assert (
977
+ multistream_dump_info["status"] == "done"
978
+ ), "Specified dump (%s) multistream status is not 'done': %s" % (
979
+ _base_url(lang),
980
+ multistream_dump_info["status"],
981
+ )
982
+
983
+ for fname, info in multistream_dump_info["files"].items():
984
+ if ".xml" not in fname:
985
+ continue
986
+ total_bytes += info["size"]
987
+ xml_urls.append(_base_url(lang) + fname)
988
 
989
  # Use dictionary since testing mock always returns the same result.
990
+ downloaded_files = dl_manager.download({"xml": xml_urls})
991
+ files = downloaded_files["xml"]
 
992
 
993
  return [
994
  datasets.SplitGenerator( # pylint:disable=g-complex-comprehension
995
+ name=datasets.Split.TRAIN, gen_kwargs={"files": files, "is_processed": is_processed}
996
  )
997
  ]
998
 
999
+ def _generate_tables(self, files, is_processed):
1000
+ if is_processed:
1001
+ batch_idx = 0
1002
+ for file in files:
1003
+ with open(file, "rb") as f:
1004
+ f = pq.ParquetFile(f)
1005
+ for batch in f.iter_batches(batch_size=self.BATCH_SIZE):
1006
+ yield batch_idx, pa.Table.from_batches([batch])
1007
+ batch_idx += 1
1008
+ else:
1009
+ batch = []
1010
+ batch_idx = 0
1011
+ for file in files:
1012
+ logger.info("generating examples from = %s", file)
1013
+ for content in _extract_content(file):
1014
+ example = _clean_content(content, self.config.language)
1015
+ if example is None:
1016
  continue
1017
+ batch.append(example)
1018
+ if len(batch) >= self.BATCH_SIZE:
1019
+ pa_table = pa.Table.from_pylist(batch)
1020
+ yield batch_idx, pa_table
1021
+ batch = []
1022
+ batch_idx += 1
 
 
 
 
 
 
 
1023
 
1024
+ if batch:
1025
+ pa_table = pa.Table.from_pylist(batch)
1026
+ yield batch_idx, pa_table
 
1027
 
 
 
1028
 
1029
+ def _extract_content(file):
1030
+ """Extracts article content from a single WikiMedia XML file."""
1031
+ print("Extracting content from", file)
1032
+ with open(file, "rb") as f:
1033
+ f = bz2.BZ2File(filename=f)
1034
+ # Workaround due to: https://github.com/tensorflow/tensorflow/issues/33563
1035
+ utf_f = codecs.getreader("utf-8")(f)
1036
+ context = etree.iterparse(utf_f, events=("end",))
1037
+ for unused_event, elem in context:
1038
+ if not elem.tag.endswith("page"):
1039
+ continue
1040
+ namespace = elem.tag[:-4]
1041
+ title = elem.find(f"./{namespace}title").text
1042
+ ns = elem.find(f"./{namespace}ns").text
1043
+ id_ = elem.find(f"./{namespace}id").text
1044
+ red_ = elem.find(f"./{namespace}redirect")
1045
 
1046
+ # Filter pages that are not in the "main" namespace.
1047
+ if ns != "0":
1048
+ elem.clear()
1049
+ continue
1050
 
1051
+ raw_content = elem.find(f"./{namespace}revision/{namespace}text").text
1052
+ elem.clear()
1053
 
1054
+ # Filter redirects.
1055
+ if raw_content is None or red_ is not None:
1056
+ continue
1057
 
1058
+ yield id_, title, raw_content
 
 
 
 
 
 
1059
 
1060
 
1061
  def _parse_and_clean_wikicode(raw_content, parser, language):
 
1120
  def _construct_url(title, language):
1121
  # See: https://meta.wikimedia.org/wiki/Help:URL
1122
  return f"https://{language}.wikipedia.org/wiki/{quote(title)}"
1123
+
1124
+
1125
+ def _clean_content(inputs, language):
1126
+ """Cleans raw wikicode to extract text."""
1127
+ import mwparserfromhell
1128
+
1129
+ id_, title, raw_content = inputs
1130
+ try:
1131
+ text = _parse_and_clean_wikicode(raw_content, parser=mwparserfromhell, language=language)
1132
+ except (mwparserfromhell.parser.ParserError) as e:
1133
+ logger.error("mwparserfromhell ParseError: %s", e)
1134
+ return
1135
+
1136
+ if not text:
1137
+ return
1138
+
1139
+ url = _construct_url(title, language)
1140
+
1141
+ return {"id": id_, "url": url, "title": title, "text": text}