File size: 5,820 Bytes
4fb7cb6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
391657a
4fb7cb6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
# Lint as: python3
"""REBEL"""

from __future__ import absolute_import, division, print_function

import datasets
import os
import re 
import json
import logging

_DESCRIPTION = """\
REBEL-Portuguese is an REBEL adaptation for Portuguese.
"""

_URL = "https://huggingface.co/datasets/ju-resplande/rebel-pt/resolve/main/rebel-pt.zip"
_LICENSE = "Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License (CC BY-NC-SA 4.0)"
_CITATION = """\
    @inproceedings{huguet-cabot-navigli-2021-rebel,
    title = "REBEL: Relation Extraction By End-to-end Language generation",
    author = "Huguet Cabot, Pere-Llu{\'\i}s  and
      Navigli, Roberto",
    booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2021",
    month = nov,
    year = "2021",
    address = "Online and in the Barceló Bávaro Convention Centre, Punta Cana, Dominican Republic",
    publisher = "Association for Computational Linguistics",
    url = "https://github.com/Babelscape/rebel/blob/main/docs/EMNLP_2021_REBEL__Camera_Ready_.pdf",
    }
"""
_HOMEPAGE = "https://github.com/ju-resplande/crocodile"


class RebelConfig(datasets.BuilderConfig):
    """BuilderConfig for REBEL."""

    def __init__(self, **kwargs):
        """BuilderConfig for REBEL.
        Args:
          **kwargs: keyword arguments forwarded to super.
        """
        super(RebelConfig, self).__init__(**kwargs)


class Rebel(datasets.GeneratorBasedBuilder):
    """Rebel 1.0"""

    BUILDER_CONFIGS = [
        RebelConfig(
            name="REBEL",
            version=datasets.Version("1.0.0"),
            description=_DESCRIPTION,
        ),
    ]

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "id": datasets.Value("string"),
                    "title": datasets.Value("string"),
                    "context": datasets.Value("string"),
                    "triplets": datasets.Value("string"),
                }
            ),
            supervised_keys=None,
            homepage=_HOMEPAGE,
            citation=_CITATION,
            license=_LICENSE,
        )

    def _split_generators(self, dl_manager):
        if self.config.data_dir:
            data_dir = self.config.data_dir
        else:
            data_dir = dl_manager.download_and_extract(_URL)

        return [
            datasets.SplitGenerator(name='pt', gen_kwargs={"filepath": os.path.join(data_dir, "pt.jsonl")})
            #datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": os.path.join(data_dir, "en_train.jsonl")}),
            #datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": os.path.join(data_dir,"en_val.jsonl")}),
            #datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": os.path.join(data_dir,"en_test.jsonl")}),
        ]

    def _generate_examples(self, filepath):
        """This function returns the examples in the raw (text) form."""
        logging.info("generating examples from = %s", filepath)

        with open(filepath, encoding="utf-8") as f:
            for id_, row in enumerate(f):
                article = json.loads(row)
                prev_len = 0
                if len(article['triples']) == 0:
                    continue
                count = 0
                for text_paragraph in article['text'].split('\n'):
                    if len(text_paragraph) == 0:
                        continue
                    sentences = re.split(r'(?<=[.])\s', text_paragraph)
                    text = ''
                    for sentence in sentences:
                        text += sentence + ' '
                        if any([entity['boundaries'][0] < len(text) + prev_len < entity['boundaries'][1] for entity in article['entities']]):
                            continue
                        entities = sorted([entity for entity in article['entities'] if prev_len < entity['boundaries'][1] <= len(text)+prev_len], key=lambda tup: tup['boundaries'][0])
                        decoder_output = '<triplet> '
                        for int_ent, entity in enumerate(entities):
                            triplets = sorted([triplet for triplet in article['triples'] if triplet['subject'] == entity and prev_len< triplet['subject']['boundaries'][1]<=len(text) + prev_len and prev_len< triplet['object']['boundaries'][1]<=len(text)+ prev_len], key=lambda tup: tup['object']['boundaries'][0])
                            if len(triplets) == 0:
                                continue
                            decoder_output += entity['surfaceform'] + ' <subj> '
                            for triplet in triplets:
                                decoder_output += triplet['object']['surfaceform'] + ' <obj> '  + triplet['predicate']['surfaceform'] + ' <subj> '
                            decoder_output = decoder_output[:-len(' <subj> ')]
                            decoder_output += ' <triplet> '
                        decoder_output = decoder_output[:-len(' <triplet> ')]
                        count += 1
                        prev_len += len(text)

                        if len(decoder_output) == 0:
                            text = ''
                            continue

                        text = re.sub('([\[\].,!?()])', r' \1 ', text.replace('()', ''))
                        text = re.sub('\s{2,}', ' ', text)

                        yield article['docid'] + '-' + str(count), {
                            "title": article['title'],
                            "context": text,
                            "id": article['uri'] + '-' + str(count),
                            "triplets": decoder_output,
                        }
                        text = ''