PereLluis13 commited on
Commit
171f296
1 Parent(s): bdf812c
Files changed (3) hide show
  1. .gitattributes +1 -0
  2. rebel.py +150 -0
  3. rebel_dataset.zip +3 -0
.gitattributes CHANGED
@@ -25,3 +25,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
28
+ rebel_dataset.zip filter=lfs diff=lfs merge=lfs -text
rebel.py ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Lint as: python3
2
+ """REBEL"""
3
+
4
+ from __future__ import absolute_import, division, print_function
5
+
6
+ import datasets
7
+
8
+ import re
9
+ import json
10
+ import logging
11
+
12
+ _DESCRIPTION = """\
13
+ REBEL is a silver dataset created for the paper REBEL: Relation Extraction By End-to-end Language generation
14
+ """
15
+
16
+ _URL = "https://huggingface.co/datasets/Babelscape/rebel-dataset/resolve/main/rebel_dataset.zip"
17
+ _URLS = {
18
+ "train": _URL + "en_train.jsonl",
19
+ "dev": _URL + "en_val.jsonl",
20
+ "test": _URL + "en_test.jsonl",
21
+ }
22
+ _LICENSE = "Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License (CC BY-NC-SA 4.0)"
23
+ _CITATION = """\
24
+
25
+ @inproceedings{huguet-cabot-navigli-2021-rebel,
26
+
27
+ title = "REBEL: Relation Extraction By End-to-end Language generation",
28
+
29
+ author = "Huguet Cabot, Pere-Llu{\'\i}s and
30
+
31
+ Navigli, Roberto",
32
+
33
+ booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2021",
34
+
35
+ month = nov,
36
+
37
+ year = "2021",
38
+
39
+ address = "Online and in the Barceló Bávaro Convention Centre, Punta Cana, Dominican Republic",
40
+
41
+ publisher = "Association for Computational Linguistics",
42
+
43
+ url = "https://github.com/Babelscape/rebel/blob/main/docs/EMNLP_2021_REBEL__Camera_Ready_.pdf",
44
+
45
+ }
46
+
47
+ """
48
+ _HOMEPAGE = "https://github.com/Babelscape/rebel"
49
+
50
+
51
+ class RebelConfig(datasets.BuilderConfig):
52
+ """BuilderConfig for REBEL."""
53
+
54
+ def __init__(self, **kwargs):
55
+ """BuilderConfig for REBEL.
56
+ Args:
57
+ **kwargs: keyword arguments forwarded to super.
58
+ """
59
+ super(RebelConfig, self).__init__(**kwargs)
60
+
61
+
62
+ class Rebel(datasets.GeneratorBasedBuilder):
63
+ """Rebel 1.0"""
64
+
65
+ BUILDER_CONFIGS = [
66
+ RebelConfig(
67
+ name="REBEL",
68
+ version=datasets.Version("1.0.0"),
69
+ description=_DESCRIPTION,
70
+ ),
71
+ ]
72
+
73
+ def _info(self):
74
+ return datasets.DatasetInfo(
75
+ description=_DESCRIPTION,
76
+ features=datasets.Features(
77
+ {
78
+ "id": datasets.Value("string"),
79
+ "title": datasets.Value("string"),
80
+ "context": datasets.Value("string"),
81
+ "triplets": datasets.Value("string"),
82
+ }
83
+ ),
84
+ supervised_keys=None,
85
+ homepage=_HOMEPAGE,
86
+ citation=_CITATION,
87
+ license=_LICENSE,
88
+ )
89
+
90
+ def _split_generators(self, dl_manager):
91
+ if self.config.data_dir:
92
+ data_dir = self.config.data_dir
93
+ else:
94
+ data_dir = dl_manager.download_and_extract(_URL)
95
+
96
+ return [
97
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_dir + "/en_train.jsonl"}),
98
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": data_dir + "/en_val.jsonl"}),
99
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_dir + "/en_test.jsonl"}),
100
+ ]
101
+
102
+ def _generate_examples(self, filepath):
103
+ """This function returns the examples in the raw (text) form."""
104
+ logging.info("generating examples from = %s", filepath)
105
+
106
+ with open(filepath, encoding="utf-8") as f:
107
+ for id_, row in enumerate(f):
108
+ article = json.loads(row)
109
+ prev_len = 0
110
+ if len(article['triples']) == 0:
111
+ continue
112
+ count = 0
113
+ for text_paragraph in article['text'].split('\n'):
114
+ if len(text_paragraph) == 0:
115
+ continue
116
+ sentences = re.split(r'(?<=[.])\s', text_paragraph)
117
+ text = ''
118
+ for sentence in sentences:
119
+ text += sentence + ' '
120
+ if any([entity['boundaries'][0] < len(text) + prev_len < entity['boundaries'][1] for entity in article['entities']]):
121
+ continue
122
+ entities = sorted([entity for entity in article['entities'] if prev_len < entity['boundaries'][1] <= len(text)+prev_len], key=lambda tup: tup['boundaries'][0])
123
+ decoder_output = '<triplet> '
124
+ for int_ent, entity in enumerate(entities):
125
+ triplets = sorted([triplet for triplet in article['triples'] if triplet['subject'] == entity and prev_len< triplet['subject']['boundaries'][1]<=len(text) + prev_len and prev_len< triplet['object']['boundaries'][1]<=len(text)+ prev_len], key=lambda tup: tup['object']['boundaries'][0])
126
+ if len(triplets) == 0:
127
+ continue
128
+ decoder_output += entity['surfaceform'] + ' <subj> '
129
+ for triplet in triplets:
130
+ decoder_output += triplet['object']['surfaceform'] + ' <obj> ' + triplet['predicate']['surfaceform'] + ' <subj> '
131
+ decoder_output = decoder_output[:-len(' <subj> ')]
132
+ decoder_output += ' <triplet> '
133
+ decoder_output = decoder_output[:-len(' <triplet> ')]
134
+ count += 1
135
+ prev_len += len(text)
136
+
137
+ if len(decoder_output) == 0:
138
+ text = ''
139
+ continue
140
+
141
+ text = re.sub('([\[\].,!?()])', r' \1 ', text.replace('()', ''))
142
+ text = re.sub('\s{2,}', ' ', text)
143
+
144
+ yield article['uri'] + '-' + str(count), {
145
+ "title": article['title'],
146
+ "context": text,
147
+ "id": article['uri'] + '-' + str(count),
148
+ "triplets": decoder_output,
149
+ }
150
+ text = ''
rebel_dataset.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3e5a84a32b604b80617acc868ae3437e00f07e88244d72fbda44ccfbb3989980
3
+ size 1490017445