File size: 3,678 Bytes
df1f8c6
 
 
 
 
 
 
 
 
 
1e482ba
 
 
df1f8c6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7552760
1e482ba
 
 
7552760
df1f8c6
 
 
 
 
1e482ba
df1f8c6
 
 
 
 
 
 
 
 
 
1e482ba
df1f8c6
 
 
 
1e482ba
df1f8c6
 
 
 
 
1e482ba
df1f8c6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
"""Collection3: Russian dataset for named entity recognition"""

import os

import datasets


logger = datasets.logging.get_logger(__name__)



_HOMEPAGE = "http://labinform.ru/pub/named_entities/index.htm"

_CITATION = """\
@inproceedings{mozharova-loukachevitch-2016-two-stage-russian-ner,
  author={Mozharova, Valerie and Loukachevitch, Natalia},
  booktitle={2016 International FRUCT Conference on Intelligence, Social Media and Web (ISMW FRUCT)},
  title={Two-stage approach in Russian named entity recognition},
  year={2016},
  pages={1-6},
  doi={10.1109/FRUCT.2016.7584769}}
"""

_DESCRIPTION = """\
Collection3 is a Russian dataset for named entity recognition annotated with LOC (location), PER (person), and ORG (organization) tags.

Dataset is based on collection Persons-1000 originally containing 1000 news documents labeled only with names of persons.
Additional labels were added by Valerie Mozharova and Natalia Loukachevitch.
Conversion to the IOB2 format and splitting into train, validation and test sets was done by DeepPavlov team.

For more details see https://ieeexplore.ieee.org/document/7584769 and http://labinform.ru/pub/named_entities/index.htm
"""

_URLS = {
    "train": os.path.join("data", "train.txt.gz"),
    "dev": os.path.join("data", "valid.txt.gz"),
    "test": os.path.join("data", "test.txt.gz"),
}


class Collection3(datasets.GeneratorBasedBuilder):
    """Collection3 dataset."""

    VERSION = datasets.Version("1.0.0")

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "id": datasets.Value("string"),
                    "tokens": datasets.Sequence(datasets.Value("string")),
                    "ner_tags": datasets.Sequence(
                        datasets.features.ClassLabel(
                            names=["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC",]
                        )
                    ),
                }
            ),
            homepage=_HOMEPAGE,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        """Returns SplitGenerators."""
        data_files = dl_manager.download_and_extract(_URLS)

        return [
            datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_files["train"]}),
            datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": data_files["dev"]}),
            datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_files["test"]}),
        ]

    def _generate_examples(self, filepath):
        logger.info("⏳ Generating examples from = %s", filepath)
        with open(filepath, encoding="utf-8") as f:
            guid = 0
            tokens = []
            ner_tags = []
            for line in f:
                if line.startswith("<DOCSTART>") or line == "" or line == "\n":
                    if tokens:
                        yield guid, {
                            "id": str(guid),
                            "tokens": tokens,
                            "ner_tags": ner_tags,
                        }
                        guid += 1
                        tokens = []
                        ner_tags = []
                else:
                    splits = line.split("\t")
                    tokens.append(splits[0])
                    ner_tags.append(splits[1].rstrip())
            # last example
            if tokens:
                yield guid, {
                    "id": str(guid),
                    "tokens": tokens,
                    "ner_tags": ner_tags,
                }