SAW-corpus / saw_corpus.py
MMinasyan's picture
Upload dataset script
dd4511d
import json
import datasets
logger = datasets.logging.get_logger(__name__)
_DESCRIPTION = """\
The Selective Armenian Web (SAW) Corpus is an assembled collection of Armenian language texts sourced from various online platforms. The dataset is designed to support and enhance natural language processing tasks specifically for the Armenian language, offering a diverse range of texts that include news articles, legal documents, and other web content.
"""
_CITATION = """\
@dataset{saw_corpus_2024,
title = {Selective Armenian Web (SAW) Corpus},
author = {Mkrtich Minasyan},
year = {2024}
}
"""
_LICENSE = "Creative Commons Attribution-ShareAlike 4.0 International (CC BY-SA 4.0)"
class SAWCorpus(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.0")
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features({
"text": datasets.Value("string"),
"url": datasets.Value("string"),
"date": datasets.Value("string"),
"tags": datasets.Sequence(datasets.Value("string")),
"source": datasets.Value("string"),
}),
supervised_keys=None,
homepage="https://huggingface.co/datasets/MMinasyan/SAW-corpus",
citation=_CITATION,
license=_LICENSE,
)
def _split_generators(self, dl_manager):
urls = {
"train": "https://huggingface.co/datasets/SAW-corpus/train.jsonl",
"validation": "https://huggingface.co/datasets/SAW-corpus/val.jsonl",
"test": "https://huggingface.co/datasets/SAW-corpus/test.jsonl",
}
downloaded_files = dl_manager.download_and_extract(urls)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["validation"]}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
]
def _generate_examples(self, filepath):
logger.info("Generating examples from = %s", filepath)
with open(filepath, encoding="utf-8") as f:
for id_, line in enumerate(f):
data = json.loads(line)
yield id_, {
"text": data["text"],
"url": data["url"],
"date": data["date"],
"tags": data["tags"],
"source": data["source"]
}