import datasets import os import logging import lm_dataformat _DESC = """ BinhVQ news corpus version 2021 (~20M records) https://github.com/binhvq/news-corpus Preprocessed: - Read mongo dump and export to jsonl - Clean content with Beautifulsoup - Concatenate title, sapo and content - Remove exact match sha256 - Shuffle and split train / val (0.01) **IMPORTANT**: Please run `pip install lm_dataformat` before load this dataset """ _REPO_URL = "https://huggingface.co/datasets/imthanhlv/binhvq_dedup/tree/main/" _URLS = { "val": "val.jsonl.zst", } try: import lm_dataformat except ImportError: print( "Can't import lm_dataformat, please run pip install lm_dataformat and try again" ) exit() class BinhvqConfig(datasets.BuilderConfig): def __init__(self, **kwargs): super(BinhvqConfig, self).__init__(**kwargs) class Binhvq(datasets.GeneratorBasedBuilder): BUILDER_CONFIGS = [ BinhvqConfig( name="plain_text", version=datasets.Version("1.0.0", ""), description="Plain text", ), ] def _info(self): return datasets.DatasetInfo( description=_DESC, features=datasets.Features({"text": datasets.Value("string")}), homepage="https://github.com/binhvq/news-corpus", ) def _split_generators(self, dl_manager): downloaded_files = dl_manager.download_and_extract(_URLS) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["val"]}, ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["val"]}, ), ] def _generate_examples(self, filepath): logging.info(f"Generating examples from {filepath}") reader = lm_dataformat.Reader(filepath) for doc in reader.stream_data(): yield {"text": doc}