ju-resplande commited on
Commit
4fb7cb6
1 Parent(s): 98c930d

dataset builder

Browse files
Files changed (1) hide show
  1. rebel-pt.py +134 -0
rebel-pt.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Lint as: python3
2
+ """REBEL"""
3
+
4
+ from __future__ import absolute_import, division, print_function
5
+
6
+ import datasets
7
+ import os
8
+ import re
9
+ import json
10
+ import logging
11
+
12
+ _DESCRIPTION = """\
13
+ REBEL-Portuguese is an REBEL adaptation for Portuguese.
14
+ """
15
+
16
+ _URL = "https://huggingface.co/datasets/ju-resplande/rebel-pt/resolve/main/pt.zip"
17
+ _LICENSE = "Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License (CC BY-NC-SA 4.0)"
18
+ _CITATION = """\
19
+ @inproceedings{huguet-cabot-navigli-2021-rebel,
20
+ title = "REBEL: Relation Extraction By End-to-end Language generation",
21
+ author = "Huguet Cabot, Pere-Llu{\'\i}s and
22
+ Navigli, Roberto",
23
+ booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2021",
24
+ month = nov,
25
+ year = "2021",
26
+ address = "Online and in the Barceló Bávaro Convention Centre, Punta Cana, Dominican Republic",
27
+ publisher = "Association for Computational Linguistics",
28
+ url = "https://github.com/Babelscape/rebel/blob/main/docs/EMNLP_2021_REBEL__Camera_Ready_.pdf",
29
+ }
30
+ """
31
+ _HOMEPAGE = "https://github.com/ju-resplande/crocodile"
32
+
33
+
34
+ class RebelConfig(datasets.BuilderConfig):
35
+ """BuilderConfig for REBEL."""
36
+
37
+ def __init__(self, **kwargs):
38
+ """BuilderConfig for REBEL.
39
+ Args:
40
+ **kwargs: keyword arguments forwarded to super.
41
+ """
42
+ super(RebelConfig, self).__init__(**kwargs)
43
+
44
+
45
+ class Rebel(datasets.GeneratorBasedBuilder):
46
+ """Rebel 1.0"""
47
+
48
+ BUILDER_CONFIGS = [
49
+ RebelConfig(
50
+ name="REBEL",
51
+ version=datasets.Version("1.0.0"),
52
+ description=_DESCRIPTION,
53
+ ),
54
+ ]
55
+
56
+ def _info(self):
57
+ return datasets.DatasetInfo(
58
+ description=_DESCRIPTION,
59
+ features=datasets.Features(
60
+ {
61
+ "id": datasets.Value("string"),
62
+ "title": datasets.Value("string"),
63
+ "context": datasets.Value("string"),
64
+ "triplets": datasets.Value("string"),
65
+ }
66
+ ),
67
+ supervised_keys=None,
68
+ homepage=_HOMEPAGE,
69
+ citation=_CITATION,
70
+ license=_LICENSE,
71
+ )
72
+
73
+ def _split_generators(self, dl_manager):
74
+ if self.config.data_dir:
75
+ data_dir = self.config.data_dir
76
+ else:
77
+ data_dir = dl_manager.download_and_extract(_URL)
78
+
79
+ return [
80
+ datasets.SplitGenerator(name='pt', gen_kwargs={"filepath": os.path.join(data_dir, "pt.jsonl")})
81
+ #datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": os.path.join(data_dir, "en_train.jsonl")}),
82
+ #datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": os.path.join(data_dir,"en_val.jsonl")}),
83
+ #datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": os.path.join(data_dir,"en_test.jsonl")}),
84
+ ]
85
+
86
+ def _generate_examples(self, filepath):
87
+ """This function returns the examples in the raw (text) form."""
88
+ logging.info("generating examples from = %s", filepath)
89
+
90
+ with open(filepath, encoding="utf-8") as f:
91
+ for id_, row in enumerate(f):
92
+ article = json.loads(row)
93
+ prev_len = 0
94
+ if len(article['triples']) == 0:
95
+ continue
96
+ count = 0
97
+ for text_paragraph in article['text'].split('\n'):
98
+ if len(text_paragraph) == 0:
99
+ continue
100
+ sentences = re.split(r'(?<=[.])\s', text_paragraph)
101
+ text = ''
102
+ for sentence in sentences:
103
+ text += sentence + ' '
104
+ if any([entity['boundaries'][0] < len(text) + prev_len < entity['boundaries'][1] for entity in article['entities']]):
105
+ continue
106
+ entities = sorted([entity for entity in article['entities'] if prev_len < entity['boundaries'][1] <= len(text)+prev_len], key=lambda tup: tup['boundaries'][0])
107
+ decoder_output = '<triplet> '
108
+ for int_ent, entity in enumerate(entities):
109
+ triplets = sorted([triplet for triplet in article['triples'] if triplet['subject'] == entity and prev_len< triplet['subject']['boundaries'][1]<=len(text) + prev_len and prev_len< triplet['object']['boundaries'][1]<=len(text)+ prev_len], key=lambda tup: tup['object']['boundaries'][0])
110
+ if len(triplets) == 0:
111
+ continue
112
+ decoder_output += entity['surfaceform'] + ' <subj> '
113
+ for triplet in triplets:
114
+ decoder_output += triplet['object']['surfaceform'] + ' <obj> ' + triplet['predicate']['surfaceform'] + ' <subj> '
115
+ decoder_output = decoder_output[:-len(' <subj> ')]
116
+ decoder_output += ' <triplet> '
117
+ decoder_output = decoder_output[:-len(' <triplet> ')]
118
+ count += 1
119
+ prev_len += len(text)
120
+
121
+ if len(decoder_output) == 0:
122
+ text = ''
123
+ continue
124
+
125
+ text = re.sub('([\[\].,!?()])', r' \1 ', text.replace('()', ''))
126
+ text = re.sub('\s{2,}', ' ', text)
127
+
128
+ yield article['docid'] + '-' + str(count), {
129
+ "title": article['title'],
130
+ "context": text,
131
+ "id": article['uri'] + '-' + str(count),
132
+ "triplets": decoder_output,
133
+ }
134
+ text = ''