rajeshradhakrishnan commited on
Commit
77bcbc4
1 Parent(s): a29e26b

Updated for streaming dataset

Browse files
Files changed (1) hide show
  1. malayalam_wiki.py +20 -43
malayalam_wiki.py CHANGED
@@ -1,10 +1,10 @@
1
- import random
2
  import os
3
  import re
4
 
5
  import datasets
6
 
7
- from datasets.tasks import TextClassification
 
8
 
9
  _DESCRIPTION = """\
10
  Common Crawl - Malayalam.
@@ -20,8 +20,9 @@ _CITATION = """\
20
  """
21
 
22
  _URLs = {
23
- "malayalam_wiki_1": "https://calicut.qburst.in/commoncrawl/malayalam/2020-10/malayalam_filtered_html_body.tar.gz",
24
- "malayalam_wiki_2": "https://calicut.qburst.in/commoncrawl/malayalam/2020-10/unfiltered_heading_and_para.tar.gz"
 
25
  }
26
 
27
 
@@ -43,15 +44,11 @@ class MalayalamWiki(datasets.GeneratorBasedBuilder):
43
 
44
  BUILDER_CONFIGS = [
45
  MalayalamWikiConfig(
46
- name="malayalam_wiki", version=VERSION, description="Common Crawl - Malayalam."
47
  ),
48
  ]
49
 
50
 
51
- def partition (self, list_in, n):
52
- random.shuffle(list_in)
53
- return [list_in[i::n] for i in range(n)]
54
-
55
  def remove_special_characters(self, txt):
56
  chars_to_ignore_regex = '[\,\?\.\!\-\;\:\"\“\%\‘\”\�Utrnle\_]'
57
  unicode_ignore_regex = r'[\u200e\u200c\u200d]'
@@ -78,40 +75,20 @@ class MalayalamWiki(datasets.GeneratorBasedBuilder):
78
 
79
  def _split_generators(self, dl_manager):
80
  """Returns SplitGenerators."""
81
- # TODO: implement iter_archive() instead of download_and_extract
82
- dl_path = dl_manager.download_and_extract(_URLs)
83
- files = sorted(os.listdir(os.path.join(dl_path["malayalam_wiki_1"],"malayalam_filtered_html_body")))
84
- file_paths = [os.path.join(dl_path["malayalam_wiki_1"], "malayalam_filtered_html_body" , file) for file in files]
85
- files = sorted(os.listdir(os.path.join(dl_path["malayalam_wiki_2"],"unfiltered_heading_and_para")))
86
- wiki_2 = [os.path.join(dl_path["malayalam_wiki_2"],"unfiltered_heading_and_para", file) for file in files]
87
- file_paths.extend(wiki_2)
88
- filepaths_splice = self.partition(file_paths,3)
89
  return [
90
- datasets.SplitGenerator(
91
- name=datasets.Split.TRAIN,
92
- gen_kwargs={
93
- "filepath": filepaths_splice[0],
94
- "split": "train",
95
- },
96
- ),
97
- datasets.SplitGenerator(
98
- name=datasets.Split.VALIDATION,
99
- gen_kwargs={
100
- "filepath": filepaths_splice[1],
101
- "split": "validation",
102
- },
103
- ),
104
- datasets.SplitGenerator(
105
- name=datasets.Split.TEST,
106
- gen_kwargs={
107
- "filepath": filepaths_splice[2],
108
- "split": "test",
109
- },
110
- )
111
  ]
112
 
113
- def _generate_examples(self, filepath):
114
- for file_id, file in enumerate(filepath):
115
- with open(file, encoding="utf-8") as f:
116
- for row_id, row in enumerate(f):
117
- yield f"{file_id}_{row_id}", {"text": self.remove_special_characters(row).strip()}
 
 
 
 
1
  import os
2
  import re
3
 
4
  import datasets
5
 
6
+
7
+ logger = datasets.logging.get_logger(__name__)
8
 
9
  _DESCRIPTION = """\
10
  Common Crawl - Malayalam.
 
20
  """
21
 
22
  _URLs = {
23
+ "malayalam_wiki_2020": "https://huggingface.co/datasets/rajeshradhakrishnan/malayalam_2020_wiki/resolve/main/",
24
+ "checksum_url": "https://huggingface.co/datasets/rajeshradhakrishnan/malayalam_2020_wiki/resolve/main/ml_sha256.txt"
25
+
26
  }
27
 
28
 
 
44
 
45
  BUILDER_CONFIGS = [
46
  MalayalamWikiConfig(
47
+ name="malayalam_wiki_2020", version=VERSION, description="Common Crawl - Malayalam."
48
  ),
49
  ]
50
 
51
 
 
 
 
 
52
  def remove_special_characters(self, txt):
53
  chars_to_ignore_regex = '[\,\?\.\!\-\;\:\"\“\%\‘\”\�Utrnle\_]'
54
  unicode_ignore_regex = r'[\u200e\u200c\u200d]'
 
75
 
76
  def _split_generators(self, dl_manager):
77
  """Returns SplitGenerators."""
78
+ checksum_url = _URLs["checksum_url"]
79
+ checksum_file = dl_manager.download(checksum_url)
80
+ with open(checksum_file, encoding="utf-8") as f:
81
+ data_filenames = [line.strip() for line in f if line]
82
+ data_urls = [_URLs["malayalam_wiki_2020"] + data_filename for data_filename in data_filenames[1:2]]
83
+ downloaded_files = dl_manager.download(data_urls)
 
 
84
  return [
85
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": downloaded_files}),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
86
  ]
87
 
88
+ def _generate_examples(self, filepaths):
89
+ """This function returns the examples in the raw (text) form by iterating on all the files."""
90
+ for file_id,filepath in enumerate(filepaths):
91
+ logger.info("generating examples from = %s", filepath)
92
+ with open(filepath, encoding="utf-8") as f:
93
+ for row_id, row in enumerate(f):
94
+ yield f"{file_id}_{row_id}", {"text": self.remove_special_characters(row).strip()}