File size: 5,487 Bytes
f8c8d34
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57b101a
 
f8c8d34
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57b101a
 
 
 
 
f8c8d34
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
"""DocRED: A Large-Scale Document-Level Relation Extraction Dataset"""


import json
import os

import datasets


_CITATION = """\
@inproceedings{yao2019DocRED,
  title={{DocRED}: A Large-Scale Document-Level Relation Extraction Dataset},
  author={Yao, Yuan and Ye, Deming and Li, Peng and Han, Xu and Lin, Yankai and Liu, Zhenghao and Liu, \
  Zhiyuan and Huang, Lixin and Zhou, Jie and Sun, Maosong},
  booktitle={Proceedings of ACL 2019},
  year={2019}
}
"""

_DESCRIPTION = """\
Multiple entities in a document generally exhibit complex inter-sentence relations, and cannot be well handled by \
existing relation extraction (RE) methods that typically focus on extracting intra-sentence relations for single \
entity pairs. In order to accelerate the research on document-level RE, we introduce DocRED, a new dataset constructed \
from Wikipedia and Wikidata with three features:
    - DocRED annotates both named entities and relations, and is the largest human-annotated dataset for document-level RE from plain text.
    - DocRED requires reading multiple sentences in a document to extract entities and infer their relations by synthesizing all information of the document.
    - Along with the human-annotated data, we also offer large-scale distantly supervised data, which enables DocRED to be adopted for both supervised and weakly supervised scenarios.
"""

_URLS = {
    "dev": "https://drive.google.com/uc?export=download&id=1AHUm1-_V9GCtGuDcc8XrMUCJE8B-HHoL",
    "train_distant": "https://drive.google.com/uc?export=download&id=1Qr4Jct2IJ9BVI86_mCk_Pz0J32ww9dYw",
    "train_annotated": "https://drive.google.com/uc?export=download&id=1NN33RzyETbanw4Dg2sRrhckhWpzuBQS9",
    "test": "https://drive.google.com/uc?export=download&id=1lAVDcD94Sigx7gR3jTfStI66o86cflum",
    "rel_info": "https://drive.google.com/uc?id=1y9A0zKrvETc1ddUFuFhBg3Xfr7FEL4dW&export=download",
}


class DocRed(datasets.GeneratorBasedBuilder):
    """DocRED: A Large-Scale Document-Level Relation Extraction Dataset"""

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "title": datasets.Value("string"),
                    "sents": datasets.features.Sequence(datasets.features.Sequence(datasets.Value("string"))),
                    "vertexSet": [
                        [
                            {
                                "name": datasets.Value("string"),
                                "sent_id": datasets.Value("int32"),
                                "pos": datasets.features.Sequence(datasets.Value("int32")),
                                "type": datasets.Value("string"),
                            }
                        ]
                    ],
                    "labels": datasets.features.Sequence(
                        {
                            "head": datasets.Value("int32"),
                            "tail": datasets.Value("int32"),
                            "relation_id": datasets.Value("string"),
                            "relation_text": datasets.Value("string"),
                            "evidence": datasets.features.Sequence(datasets.Value("int32")),
                        }
                    ),
                }
            ),
            supervised_keys=None,
            homepage="https://github.com/thunlp/DocRED",
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        downloads = {}
        for key in _URLS.keys():
            downloads[key] = dl_manager.download_and_extract(_URLS[key])
            #  Fix for dummy data
            if os.path.isdir(downloads[key]):
                downloads[key] = os.path.join(downloads[key], key + ".json")

        return [
            datasets.SplitGenerator(
                name=datasets.Split.VALIDATION,
                gen_kwargs={"filepath": downloads["dev"], "rel_info": downloads["rel_info"]},
            ),
            datasets.SplitGenerator(
                name=datasets.Split.TEST, gen_kwargs={"filepath": downloads["test"], "rel_info": downloads["rel_info"]}
            ),
            datasets.SplitGenerator(
                name="train_annotated",
                gen_kwargs={"filepath": downloads["train_annotated"], "rel_info": downloads["rel_info"]},
            ),
            datasets.SplitGenerator(
                name="train_distant",
                gen_kwargs={"filepath": downloads["train_distant"], "rel_info": downloads["rel_info"]},
            ),
        ]

    def _generate_examples(self, filepath, rel_info):
        """Generate DocRED examples."""

        with open(rel_info, encoding="utf-8") as f:
            relation_name_map = json.load(f)
        with open(filepath, encoding="utf-8") as f:
            data = json.load(f)

        for idx, example in enumerate(data):

            # Test set has no labels - Results need to be uploaded to Codalab
            if "labels" not in example.keys():
                example["labels"] = []

            for label in example["labels"]:
                # Rename and include full relation names
                label["relation_text"] = relation_name_map[label["r"]]
                label["relation_id"] = label["r"]
                label["head"] = label["h"]
                label["tail"] = label["t"]
                del label["r"]
                del label["h"]
                del label["t"]

            yield idx, example