malayalam_wiki / malayalam_wiki.py
rajeshradhakrishnan's picture
Updated for streaming dataset
77bcbc4
import os
import re
import datasets
logger = datasets.logging.get_logger(__name__)
_DESCRIPTION = """\
Common Crawl - Malayalam.
"""
_CITATION = """\
@article{qburst,
title={Common Crawl - Malayalam},
author={n.d},
year={2020},
journal={n.d},
}
"""
_URLs = {
"malayalam_wiki_2020": "https://huggingface.co/datasets/rajeshradhakrishnan/malayalam_2020_wiki/resolve/main/",
"checksum_url": "https://huggingface.co/datasets/rajeshradhakrishnan/malayalam_2020_wiki/resolve/main/ml_sha256.txt"
}
class MalayalamWikiConfig(datasets.BuilderConfig):
"""BuilderConfig for MalayalamWiki."""
def __init__(self, **kwargs):
"""BuilderConfig for MalayalamWiki.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(MalayalamWikiConfig, self).__init__(**kwargs)
class MalayalamWiki(datasets.GeneratorBasedBuilder):
"""Malayalam News topic classification dataset."""
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
MalayalamWikiConfig(
name="malayalam_wiki_2020", version=VERSION, description="Common Crawl - Malayalam."
),
]
def remove_special_characters(self, txt):
chars_to_ignore_regex = '[\,\?\.\!\-\;\:\"\“\%\‘\”\�Utrnle\_]'
unicode_ignore_regex = r'[\u200e\u200c\u200d]'
english_ignore_regex = r'[a-zA-Z]'
txt = txt.strip()
txt = re.sub(chars_to_ignore_regex, '',txt)
txt = re.sub(unicode_ignore_regex, '',txt) + " "
txt = re.sub(english_ignore_regex, '',txt) + " "
return txt
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"text": datasets.Value("string")
}
),
supervised_keys=None,
homepage="https://github.com/qburst/common-crawl-malayalam",
citation=_CITATION
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
checksum_url = _URLs["checksum_url"]
checksum_file = dl_manager.download(checksum_url)
with open(checksum_file, encoding="utf-8") as f:
data_filenames = [line.strip() for line in f if line]
data_urls = [_URLs["malayalam_wiki_2020"] + data_filename for data_filename in data_filenames[1:2]]
downloaded_files = dl_manager.download(data_urls)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": downloaded_files}),
]
def _generate_examples(self, filepaths):
"""This function returns the examples in the raw (text) form by iterating on all the files."""
for file_id,filepath in enumerate(filepaths):
logger.info("generating examples from = %s", filepath)
with open(filepath, encoding="utf-8") as f:
for row_id, row in enumerate(f):
yield f"{file_id}_{row_id}", {"text": self.remove_special_characters(row).strip()}