persian_daily_news / tasnim_daily.py
saied's picture
adding sample data and sample script
19bdedc
raw
history blame
No virus
1.84 kB
import datasets
import csv
import os
import sys
csv.field_size_limit(sys.maxsize)
_DESCRIPTION = """
persian_news_dataset is a collection of 5 million news articles.
News articles have been gathered from more than 10 news agencies for the last 12 years.
The dataset is provided by Rohan AI lab for research purposes.
for more information refer to this link:
"""
_PROJECT_URL = """"""
_CITATION = """
https://saied71.github.io/RohanAiLab/,
author={Saied Alimoradi},
year={2021}
}
"""
_URL = "persian_news_dataset.zip"
class Persian_news(datasets.GeneratorBasedBuilder):
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"text": datasets.Value("string"),
"title": datasets.Value("string"),
"category": datasets.Value("string")
}
),
homepage=_PROJECT_URL,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
dl_dir = dl_manager.download_and_extract(_URL)
data_dir = os.path.join(dl_dir, "persian_news_dataset.csv")
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": data_dir,
},),]
def _generate_examples(self, filepath):
"""Yields examples."""
with open(filepath, encoding="utf-8") as f:
reader = csv.reader(f)
for id_, row in enumerate(reader):
if id_ == 0:
continue
yield id_, {
"text": row[0],
"title": row[2],
"category": row[1],
}