File size: 4,324 Bytes
fd4b52c
 
 
 
74364f3
fd4b52c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f92d782
 
 
fd4b52c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
import os
import datasets

"""ArmanPersoNERCorpus"""

_CITATION = """\
@inproceedings{poostchi-etal-2016-personer,
    title = "{P}erso{NER}: {P}ersian Named-Entity Recognition",
    author = "Poostchi, Hanieh  and
      Zare Borzeshi, Ehsan  and
      Abdous, Mohammad  and
      Piccardi, Massimo",
    booktitle = "Proceedings of {COLING} 2016, the 26th International Conference on Computational Linguistics: Technical Papers",
    month = dec,
    year = "2016",
    address = "Osaka, Japan",
    publisher = "The COLING 2016 Organizing Committee",
    url = "https://aclanthology.org/C16-1319",
    pages = "3381--3389",
    abstract = "Named-Entity Recognition (NER) is still a challenging task for languages with low digital resources. The main difficulties arise from the scarcity of annotated corpora and the consequent problematic training of an effective NER pipeline. To abridge this gap, in this paper we target the Persian language that is spoken by a population of over a hundred million people world-wide. We first present and provide ArmanPerosNERCorpus, the first manually-annotated Persian NER corpus. Then, we introduce PersoNER, an NER pipeline for Persian that leverages a word embedding and a sequential max-margin classifier. The experimental results show that the proposed approach is capable of achieving interesting MUC7 and CoNNL scores while outperforming two alternatives based on a CRF and a recurrent neural network.",
}
"""

_DESCRIPTION = """\
ArmanPersoNERCorpus includes 250,015 tokens and 7,682 Persian sentences in total.The NER tags are in IOB format.
"""
_HOMEPAGE = "https://github.com/HaniehP/PersianNER"
_DATA_URL = "https://github.com/HaniehP/PersianNER/raw/master/ArmanPersoNERCorpus.zip"
_TRAINING_FILE = r'arman/data/train.txt'
_DEV_FILE = r'arman/data/dev.txt'
_TEST_FILE = r'arman/data/test.txt'

class Arman(datasets.GeneratorBasedBuilder):
    """ArmanPersoNER Corpus"""

    BUILDER_CONFIGS = [
        datasets.BuilderConfig(
            name='splitted words',
            version=datasets.Version('1.0.0',''),
            description='this is splitted words version of ArmanPersoNERCorpus'
        )
    ]


    def _info(self):
        return datasets.DatasetInfo(
            description = _DESCRIPTION,
            features = datasets.Features(
                {
                    'id' : datasets.Value('string'),
                    'tokens' : datasets.Sequence(datasets.Value('string')),
                    'ner_tags' : datasets.Sequence(datasets.ClassLabel(
                        num_classes = 13,
                        names = [
                            'B-event',
                            'B-fac',
                            'B-loc',
                            'B-org',
                            'B-pers',
                            'B-pro',
                            'I-event',
                            'I-fac',
                            'I-loc',
                            'I-org',
                            'I-pers',
                            'I-pro',
                            'O'         
                        ]
                    ))
                }
            ),
            homepage = _HOMEPAGE,
            citation = _CITATION,
        )

    def _split_generators(self,dl_manager):
        
        return [
            datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'filepath' : _TRAINING_FILE}),
            datasets.SplitGenerator(name=datasets.Split.TEST , gen_kwargs={'filepath' : _TEST_FILE}),
            datasets.SplitGenerator(name=datasets.Split.VALIDATION , gen_kwargs={'filepath' : _DEV_FILE})
        ]

    def _generate_examples(self , filepath):

        qid = 0
        id_ = ''
        tokens = []
        tags = []

        with open(filepath , encoding='utf-8') as f:

            for line in f:

                id_ = line.split(';')[0]
                tokens = line.split(';')[1].split()
                tags = line.split(';')[2].strip('\n').split()

                yield qid , {
                    'id' : id_,
                    'tokens' : tokens,
                    'ner_tags' : tags,
                }

                qid += 1


            yield qid , {
                'id' : id_,
                'tokens' : tokens,
                'ner_tags' : tags,
            }