File size: 3,147 Bytes
49eb12f
f7d6c1c
49eb12f
1a68a58
49eb12f
d37cf2e
 
 
 
0c482a2
49eb12f
 
1a68a58
49eb12f
1a68a58
49eb12f
1a68a58
49eb12f
1a68a58
 
49eb12f
 
1a68a58
 
 
 
 
49eb12f
 
 
d37cf2e
49eb12f
 
 
 
 
d37cf2e
 
 
49eb12f
 
 
 
 
 
 
 
 
 
d37cf2e
 
 
 
20c5009
49eb12f
 
e39dd70
 
49eb12f
 
 
 
 
 
 
 
 
05b69ca
1a68a58
 
d37cf2e
977bce8
d37cf2e
 
49eb12f
1a68a58
 
d37cf2e
49eb12f
 
 
05b69ca
 
49eb12f
05b69ca
 
 
49eb12f
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
import json
from typing import Generator

import datasets

_CITATION = ""
_DESCRIPTION = "This is a dataset of Wikinews articles manually labeled with the named entity label."
_HOMEPAGE = "https://ja.wikinews.org/wiki/%E3%83%A1%E3%82%A4%E3%83%B3%E3%83%9A%E3%83%BC%E3%82%B8"
_LICENSE = "This work is licensed under CC BY 2.5"
_URL = "https://huggingface.co/datasets/llm-book/ner-wikinews-dataset/raw/main/annotated_wikinews.json"


class NerWikinewsDataset(datasets.GeneratorBasedBuilder):
    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "curid": datasets.Value("string"),
                    "text": datasets.Value("string"),
                    "entities": [
                        {
                            "name": datasets.Value("string"),
                            "span": datasets.Sequence(
                                datasets.Value("int64"), length=2
                            ),
                            "type": datasets.Value("string"),
                        }
                    ],
                }
            ),
            homepage=_HOMEPAGE,
            license=_LICENSE,
            citation=_CITATION,
        )

    def _convert_data_format(
        self, annotated_data: list[dict[str, any]]
    ) -> list[dict[str, any]]:
        outputs = []
        for data in annotated_data:
            if data["annotations"] == []:
                continue
            entities = []
            for annotations in data["annotations"]:
                for result in annotations["result"]:
                    entities.append(
                        {
                            "name": result["value"]["text"],
                            "span": [
                                result["value"]["start"],
                                result["value"]["end"],
                            ],
                            "type": result["value"]["labels"][0],
                        }
                    )
            if entities != []:
                entities = sorted(entities, key=lambda x: x["span"][0])
            outputs.append(
                {
                    "curid": data["id"],
                    "text": data["data"]["text"],
                    "entities": entities,
                }
            )
        return outputs

    def _split_generators(
        self, dl_manager: datasets.DownloadManager
    ) -> list[datasets.SplitGenerator]:
        data_file = dl_manager.download_and_extract(_URL)
        with open(data_file, "r", encoding="utf-8") as f:
            data = json.load(f)
        data = self._convert_data_format(data)
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TEST,
                gen_kwargs={"data": data},
            ),
        ]

    def _generate_examples(self, data: list[dict[str, str]]) -> Generator:
        for key, d in enumerate(data):
            yield key, {
                "curid": d["curid"],
                "text": d["text"],
                "entities": d["entities"],
            }