|
""" |
|
A randomly sampled 500K portion of the original LSCP dataset (Enhanced Large Scale Colloquial Persian Language Understanding) provided by Hezar AI. |
|
""" |
|
|
|
import os |
|
import csv |
|
from ast import literal_eval |
|
import datasets |
|
|
|
logger = datasets.logging.get_logger(__name__) |
|
|
|
_CITATION = """\ |
|
@inproceedings{abdi-khojasteh-etal-2020-lscp, |
|
title = "{LSCP}: Enhanced Large Scale Colloquial {P}ersian Language Understanding", |
|
author = "Abdi Khojasteh, Hadi and |
|
Ansari, Ebrahim and |
|
Bohlouli, Mahdi", |
|
booktitle = "Proceedings of the Twelfth Language Resources and Evaluation Conference", |
|
month = may, |
|
year = "2020", |
|
address = "Marseille, France", |
|
publisher = "European Language Resources Association", |
|
url = "https://aclanthology.org/2020.lrec-1.776", |
|
pages = "6323--6327", |
|
abstract = "Language recognition has been significantly advanced in recent years by means of modern machine learning methods such as deep learning and benchmarks with rich annotations. However, research is still limited in low-resource formal languages. This consists of a significant gap in describing the colloquial language especially for low-resourced ones such as Persian. In order to target this gap for low resource languages, we propose a {``}Large Scale Colloquial Persian Dataset{''} (LSCP). LSCP is hierarchically organized in a semantic taxonomy that focuses on multi-task informal Persian language understanding as a comprehensive problem. This encompasses the recognition of multiple semantic aspects in the human-level sentences, which naturally captures from the real-world sentences. We believe that further investigations and processing, as well as the application of novel algorithms and methods, can strengthen enriching computerized understanding and processing of low resource languages. The proposed corpus consists of 120M sentences resulted from 27M tweets annotated with parsing tree, part-of-speech tags, sentiment polarity and translation in five different languages.", |
|
language = "English", |
|
ISBN = "979-10-95546-34-4", |
|
} |
|
|
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
Language recognition has been significantly advanced in recent years by means of modern machine learning methods such as deep learning |
|
and benchmarks with rich annotations. However, research is still limited in low-resource formal languages. This consists of a significant |
|
gap in describing the colloquial language especially for low-resourced ones such as Persian. In order to target this gap for low resource languages, |
|
we propose a “Large Scale Colloquial Persian Dataset” (LSCP). LSCP is hierarchically organized in a semantic taxonomy that focuses on |
|
multi-task informal Persian language understanding as a comprehensive problem. This encompasses the recognition of multiple semantic aspects in the human-level sentences, |
|
which naturally captures from the real-world sentences. We believe that further investigations and processing, as well as the application of novel algorithms and methods, |
|
can strengthen enriching computerized understanding and processing of low resource languages. The proposed corpus consists of 120M sentences resulted from 27M tweets |
|
annotated with parsing tree, part-of-speech tags, sentiment polarity and translation in five different languages. |
|
""" |
|
|
|
_DOWNLOAD_URLS = { |
|
"train": "https://huggingface.co/datasets/hezarai/lscp-500k/resolve/main/lscp-500k_train.csv", |
|
"test": "https://huggingface.co/datasets/hezarai/lscp-500k/resolve/main/lscp-500k_test.csv", |
|
} |
|
|
|
|
|
class LSCP500KConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for LSCP-500K""" |
|
|
|
def __init__(self, **kwargs): |
|
"""BuilderConfig for LSCP-500K. |
|
Args: |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
super(LSCP500KConfig, self).__init__(**kwargs) |
|
|
|
|
|
class LSCP500K(datasets.GeneratorBasedBuilder): |
|
"""LSCP500K dataset.""" |
|
|
|
BUILDER_CONFIGS = [ |
|
LSCP500KConfig( |
|
name="lscp-500k", |
|
version=datasets.Version("1.0.0"), |
|
description="LSCP500K dataset", |
|
), |
|
] |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"tokens": datasets.Sequence(datasets.Value("string")), |
|
"pos_tags": datasets.Sequence( |
|
datasets.features.ClassLabel( |
|
names=[ |
|
'P', |
|
'Ne', |
|
'PRO', |
|
'CONJ', |
|
'N', |
|
'PUNC', |
|
'Pe', |
|
'ADV', |
|
'V', |
|
'AJ', |
|
'AJe', |
|
'DET', |
|
'POSTP', |
|
'NUM', |
|
'DETe', |
|
'NUMe', |
|
'PROe', |
|
'ADVe', |
|
'RES', |
|
'CL', |
|
'INT', |
|
'CONJe', |
|
'RESe', |
|
] |
|
) |
|
), |
|
} |
|
), |
|
supervised_keys=None, |
|
homepage="https://huggingface.co/datasets/hezarai/lscp-500k", |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
train_path = dl_manager.download_and_extract(_DOWNLOAD_URLS["train"]) |
|
test_path = dl_manager.download_and_extract(_DOWNLOAD_URLS["test"]) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_path} |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, gen_kwargs={"filepath": test_path} |
|
), |
|
] |
|
|
|
def _generate_examples(self, filepath): |
|
logger.info("⏳ Generating examples from = %s", filepath) |
|
with open(filepath, encoding="utf-8") as csv_file: |
|
csv_reader = csv.reader( |
|
csv_file, quotechar='"', skipinitialspace=True |
|
) |
|
for id_, row in enumerate(csv_reader): |
|
if id_ < 1: |
|
continue |
|
tokens, pos_tags = row |
|
tokens = literal_eval(tokens) |
|
pos_tags = literal_eval(pos_tags) |
|
yield id_, {"tokens": tokens, "pos_tags": pos_tags} |
|
|