Tristan davidmezzetti commited on
Commit
57e9c34
1 Parent(s): a18072d

Streaming dataset generation (#6)

Browse files

- Streaming dataset generation (38690e1383258b7bcfca85027fb75c82ab83df9c)


Co-authored-by: David Mezzetti <davidmezzetti@users.noreply.huggingface.co>

Files changed (1) hide show
  1. wikipedia.py +57 -31
wikipedia.py CHANGED
@@ -20,14 +20,15 @@
20
  import bz2
21
  import codecs
22
  import json
 
23
  import re
24
  import xml.etree.cElementTree as etree
 
 
25
  from urllib.parse import quote
26
- import mwparserfromhell
27
- from multiprocess import Process, Manager
28
- from tqdm import tqdm
29
 
30
  import datasets
 
31
 
32
 
33
  logger = datasets.logging.get_logger(__name__)
@@ -904,8 +905,8 @@ class WikipediaConfig(datasets.BuilderConfig):
904
  self.language = language
905
 
906
 
907
- _DATE = "20220301"
908
-
909
 
910
  class Wikipedia(datasets.GeneratorBasedBuilder):
911
  """Wikipedia dataset."""
@@ -967,9 +968,8 @@ class Wikipedia(datasets.GeneratorBasedBuilder):
967
 
968
  # Use dictionary since testing mock always returns the same result.
969
 
970
- print("Dowloading Wikipedia dump")
971
  downloaded_files = dl_manager.download({"xml": xml_urls})
972
- print("Finished downloading Wikipedia dump")
973
 
974
  return [
975
  datasets.SplitGenerator( # pylint:disable=g-complex-comprehension
@@ -983,7 +983,6 @@ class Wikipedia(datasets.GeneratorBasedBuilder):
983
  def _extract_content(filepath):
984
  """Extracts article content from a single WikiMedia XML file."""
985
  logger.info("generating examples from = %s", filepath)
986
- content = []
987
  f = bz2.BZ2File(filename=filepath)
988
  # Workaround due to: https://github.com/tensorflow/tensorflow/issues/33563
989
  utf_f = codecs.getreader("utf-8")(f)
@@ -991,6 +990,7 @@ class Wikipedia(datasets.GeneratorBasedBuilder):
991
  for unused_event, elem in context:
992
  if not elem.tag.endswith("page"):
993
  continue
 
994
  namespace = elem.tag[:-4]
995
  title = elem.find(f"./{namespace}title").text
996
  ns = elem.find(f"./{namespace}ns").text
@@ -1009,8 +1009,7 @@ class Wikipedia(datasets.GeneratorBasedBuilder):
1009
  if raw_content is None or red_ is not None:
1010
  continue
1011
 
1012
- content.append((id_, title, raw_content))
1013
- return content
1014
 
1015
  def _clean_content(inputs, language):
1016
  """Cleans raw wikicode to extract text."""
@@ -1028,28 +1027,55 @@ class Wikipedia(datasets.GeneratorBasedBuilder):
1028
 
1029
  return id_, {"id": id_, "url": url, "title": title, "text": text}
1030
 
1031
- print("Parsing and cleaning Wikipedia examples")
1032
- with Manager() as manager:
1033
- examples = manager.list()
1034
- processes = []
1035
- for filepath in filepaths:
1036
- def parse_and_clean(examples):
1037
- content = _extract_content(filepath)
1038
- for obj in tqdm(content):
1039
- examples.append(_clean_content(obj, language=language))
1040
- p = Process(target=parse_and_clean, args=(examples,))
1041
- p.start()
1042
- processes.append(p)
1043
-
1044
- for p in processes:
1045
- p.join()
1046
-
1047
- print("Parsed and cleaned Wikipedia examples")
1048
-
1049
- for example in examples:
1050
- if example is not None:
1051
- yield example
1052
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1053
 
1054
  def _parse_and_clean_wikicode(raw_content, parser, language):
1055
  """Strips formatting and unwanted sections from raw page content."""
 
20
  import bz2
21
  import codecs
22
  import json
23
+ import os
24
  import re
25
  import xml.etree.cElementTree as etree
26
+
27
+ from multiprocessing import Process, Queue
28
  from urllib.parse import quote
 
 
 
29
 
30
  import datasets
31
+ import mwparserfromhell
32
 
33
 
34
  logger = datasets.logging.get_logger(__name__)
 
905
  self.language = language
906
 
907
 
908
+ _DATE = "20240101"
909
+ _COMPLETE = 1
910
 
911
  class Wikipedia(datasets.GeneratorBasedBuilder):
912
  """Wikipedia dataset."""
 
968
 
969
  # Use dictionary since testing mock always returns the same result.
970
 
971
+ # Download Wikipedia files
972
  downloaded_files = dl_manager.download({"xml": xml_urls})
 
973
 
974
  return [
975
  datasets.SplitGenerator( # pylint:disable=g-complex-comprehension
 
983
  def _extract_content(filepath):
984
  """Extracts article content from a single WikiMedia XML file."""
985
  logger.info("generating examples from = %s", filepath)
 
986
  f = bz2.BZ2File(filename=filepath)
987
  # Workaround due to: https://github.com/tensorflow/tensorflow/issues/33563
988
  utf_f = codecs.getreader("utf-8")(f)
 
990
  for unused_event, elem in context:
991
  if not elem.tag.endswith("page"):
992
  continue
993
+
994
  namespace = elem.tag[:-4]
995
  title = elem.find(f"./{namespace}title").text
996
  ns = elem.find(f"./{namespace}ns").text
 
1009
  if raw_content is None or red_ is not None:
1010
  continue
1011
 
1012
+ yield (id_, title, raw_content)
 
1013
 
1014
  def _clean_content(inputs, language):
1015
  """Cleans raw wikicode to extract text."""
 
1027
 
1028
  return id_, {"id": id_, "url": url, "title": title, "text": text}
1029
 
1030
+ # Create queues, limit size of output queue
1031
+ inputs, outputs = Queue(), Queue(30000)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1032
 
1033
+ def execute(inputs, outputs):
1034
+ try:
1035
+ # Process until inputs queue is exhausted
1036
+ while not inputs.empty():
1037
+ batch, filepath = [], inputs.get()
1038
+ for obj in _extract_content(filepath):
1039
+ batch.append(_clean_content(obj, language=language))
1040
+ if len(batch) == 1024:
1041
+ outputs.put(batch)
1042
+ batch = []
1043
+
1044
+ if batch:
1045
+ outputs.put(batch)
1046
+
1047
+ finally:
1048
+ # Write message that process is complete
1049
+ outputs.put(_COMPLETE)
1050
+
1051
+ for filepath in filepaths:
1052
+ inputs.put(filepath)
1053
+
1054
+ # Start worker processes
1055
+ processes = []
1056
+ for _ in range(min(len(filepaths), os.cpu_count())):
1057
+ process = Process(target=execute, args=(inputs, outputs))
1058
+ process.start()
1059
+ processes.append(process)
1060
+
1061
+ # Read output from worker processes
1062
+ empty, complete = False, 0
1063
+ while not empty:
1064
+ # Get next result
1065
+ result = outputs.get()
1066
+
1067
+ # Mark process as complete if all workers are complete and output queue is empty
1068
+ if result == _COMPLETE:
1069
+ complete += 1
1070
+ empty = len(processes) == complete and outputs.empty()
1071
+
1072
+ elif result:
1073
+ for r in result:
1074
+ if r is not None:
1075
+ yield r
1076
+
1077
+ for process in processes:
1078
+ process.join()
1079
 
1080
  def _parse_and_clean_wikicode(raw_content, parser, language):
1081
  """Strips formatting and unwanted sections from raw page content."""