arman / arman.py
BK-V's picture
Update arman.py
f92d782
import os
import datasets
"""ArmanPersoNERCorpus"""
_CITATION = """\
@inproceedings{poostchi-etal-2016-personer,
title = "{P}erso{NER}: {P}ersian Named-Entity Recognition",
author = "Poostchi, Hanieh and
Zare Borzeshi, Ehsan and
Abdous, Mohammad and
Piccardi, Massimo",
booktitle = "Proceedings of {COLING} 2016, the 26th International Conference on Computational Linguistics: Technical Papers",
month = dec,
year = "2016",
address = "Osaka, Japan",
publisher = "The COLING 2016 Organizing Committee",
url = "https://aclanthology.org/C16-1319",
pages = "3381--3389",
abstract = "Named-Entity Recognition (NER) is still a challenging task for languages with low digital resources. The main difficulties arise from the scarcity of annotated corpora and the consequent problematic training of an effective NER pipeline. To abridge this gap, in this paper we target the Persian language that is spoken by a population of over a hundred million people world-wide. We first present and provide ArmanPerosNERCorpus, the first manually-annotated Persian NER corpus. Then, we introduce PersoNER, an NER pipeline for Persian that leverages a word embedding and a sequential max-margin classifier. The experimental results show that the proposed approach is capable of achieving interesting MUC7 and CoNNL scores while outperforming two alternatives based on a CRF and a recurrent neural network.",
}
"""
_DESCRIPTION = """\
ArmanPersoNERCorpus includes 250,015 tokens and 7,682 Persian sentences in total.The NER tags are in IOB format.
"""
_HOMEPAGE = "https://github.com/HaniehP/PersianNER"
_DATA_URL = "https://github.com/HaniehP/PersianNER/raw/master/ArmanPersoNERCorpus.zip"
_TRAINING_FILE = r'arman/data/train.txt'
_DEV_FILE = r'arman/data/dev.txt'
_TEST_FILE = r'arman/data/test.txt'
class Arman(datasets.GeneratorBasedBuilder):
"""ArmanPersoNER Corpus"""
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name='splitted words',
version=datasets.Version('1.0.0',''),
description='this is splitted words version of ArmanPersoNERCorpus'
)
]
def _info(self):
return datasets.DatasetInfo(
description = _DESCRIPTION,
features = datasets.Features(
{
'id' : datasets.Value('string'),
'tokens' : datasets.Sequence(datasets.Value('string')),
'ner_tags' : datasets.Sequence(datasets.ClassLabel(
num_classes = 13,
names = [
'B-event',
'B-fac',
'B-loc',
'B-org',
'B-pers',
'B-pro',
'I-event',
'I-fac',
'I-loc',
'I-org',
'I-pers',
'I-pro',
'O'
]
))
}
),
homepage = _HOMEPAGE,
citation = _CITATION,
)
def _split_generators(self,dl_manager):
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'filepath' : _TRAINING_FILE}),
datasets.SplitGenerator(name=datasets.Split.TEST , gen_kwargs={'filepath' : _TEST_FILE}),
datasets.SplitGenerator(name=datasets.Split.VALIDATION , gen_kwargs={'filepath' : _DEV_FILE})
]
def _generate_examples(self , filepath):
qid = 0
id_ = ''
tokens = []
tags = []
with open(filepath , encoding='utf-8') as f:
for line in f:
id_ = line.split(';')[0]
tokens = line.split(';')[1].split()
tags = line.split(';')[2].strip('\n').split()
yield qid , {
'id' : id_,
'tokens' : tokens,
'ner_tags' : tags,
}
qid += 1
yield qid , {
'id' : id_,
'tokens' : tokens,
'ner_tags' : tags,
}