Datasets:

Languages:
Persian
License:
File size: 4,754 Bytes
a5c71bc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a482bc2
 
 
a5c71bc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
import json
import datasets
import os

_CITATION = """\\
@article{shahshahani2018peyma,
    title={PEYMA: A Tagged Corpus for Persian Named Entities},
    author={Mahsa Sadat Shahshahani and Mahdi Mohseni and Azadeh Shakery and Heshaam Faili},
    year=2018,
    journal={ArXiv},
    volume={abs/1801.09936}
}
"""
_DESCRIPTION = """PEYMA dataset includes 7,145 sentences with a total of 302,530 tokens from which 41,148 tokens are tagged with seven different classes."""

_DATA_PATH = {
  'train': os.path.join('data', 'train.txt'),
  'test': os.path.join('data', 'test.txt'),
  'val': os.path.join('data', 'dev.txt')
}

class PEYMAConfig(datasets.BuilderConfig):
    """BuilderConfig for PEYMA."""
    def __init__(self, **kwargs):
        super(PEYMAConfig, self).__init__(**kwargs)


class PEYMA(datasets.GeneratorBasedBuilder):
    BUILDER_CONFIGS = [
        PEYMAConfig(name="PEYMA", version=datasets.Version("1.0.0"), description="persian ner dataset"),
    ]

    def _info(self):
        return datasets.DatasetInfo(
            # This is the description that will appear on the datasets page.
            description=_DESCRIPTION,
            # datasets.features.FeatureConnectors
            features=datasets.Features(
                {
                "tokens": datasets.Sequence(datasets.Value("string")),
                "tags": datasets.Sequence(
                    datasets.ClassLabel(
                        names=[
                          "O",
                          "B_DAT",
                          "B_LOC",
                          "B_MON",
                          "B_ORG",
                          "B_PCT",
                          "B_PER",
                          "B_TIM",
                          "I_DAT",
                          "I_LOC",
                          "I_MON",
                          "I_ORG",
                          "I_PCT",
                          "I_PER",
                          "I_TIM",
                          ]
                    )
                ),
                }
            ),
            supervised_keys=('tokens', 'tags'),
            # Homepage of the dataset for documentation
            homepage="https://hooshvare.github.io/docs/datasets/ner#peyma",
            citation=_CITATION,
        )
        
    def _split_generators(self, dl_manager):
        """Returns SplitGenerators."""
        return [
                datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                # These kwargs will be passed to _generate_examples
                gen_kwargs={
                    "filepath": _DATA_PATH["train"],
                    "split": "train",
                },),
                datasets.SplitGenerator(
                    name=datasets.Split.TEST,
                    # These kwargs will be passed to _generate_examples
                    gen_kwargs={
                        "filepath": _DATA_PATH["test"], 
                        "split": "test"},),
                datasets.SplitGenerator(
                    name=datasets.Split.VALIDATION,
                    # These kwargs will be passed to _generate_examples
                    gen_kwargs={
                        "filepath": _DATA_PATH["val"],
                        "split": "validation",
                    },
                ),    
                ]

    def _generate_examples(self, filepath, split):
        with open(filepath, "r", encoding="utf-8") as f:
            id_ = 0
            tokens = []
            ner_labels = []
            for line in f:
                stripped_line = line.strip(" \n")  # strip away whitespaces AND new line characters
                if len(stripped_line) == 0:
                    # If line is empty, it means we reached the end of a sentence.
                    # We can yield the tokens and labels
                    if len(tokens) > 0 and len(ner_labels) > 0:
                        yield id_, {
                            "tokens": tokens,
                            "tags": ner_labels,
                        }
                    else:
                        # Do not yield if tokens or ner_labels is empty
                        # It can be the case if several empty lines are contiguous
                        continue
                    # Then we need to increment the _id and reset the tokens and ner_labels list
                    id_ += 1
                    tokens = []
                    ner_labels = []
                else:
                  try:
                    token, ner_label = line.split("|")  # Retrieve token and label
                    tokens.append(token)
                    ner_labels.append(ner_label)
                  except:
                    continue