Shaltiel commited on
Commit
014f409
1 Parent(s): 852366d

Upload BertForSyntaxParsing.py

Browse files
Files changed (1) hide show
  1. BertForSyntaxParsing.py +279 -0
BertForSyntaxParsing.py ADDED
@@ -0,0 +1,279 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from transformers.utils import ModelOutput
3
+ import torch
4
+ from torch import nn
5
+ from typing import List, Tuple, Optional, Union
6
+ from dataclasses import dataclass
7
+ from transformers import BertPreTrainedModel, BertModel, BertTokenizerFast
8
+
9
+ ALL_FUNCTION_LABELS = ["nsubj", "punct", "mark", "case", "fixed", "obl", "det", "amod", "acl:relcl", "nmod", "cc", "conj", "root", "compound", "cop", "compound:affix", "advmod", "nummod", "appos", "nsubj:pass", "nmod:poss", "xcomp", "obj", "aux", "parataxis", "advcl", "ccomp", "csubj", "acl", "obl:tmod", "csubj:pass", "dep", "dislocated", "nmod:tmod", "nmod:npmod", "flat", "obl:npmod", "goeswith", "reparandum", "orphan", "list", "discourse", "iobj", "vocative", "expl", "flat:name"]
10
+
11
+ @dataclass
12
+ class SyntaxLogitsOutput(ModelOutput):
13
+ dependency_logits: torch.FloatTensor = None
14
+ function_logits: torch.FloatTensor = None
15
+ dependency_head_indices: torch.LongTensor = None
16
+
17
+ def detach(self):
18
+ return SyntaxTaggingOutput(self.dependency_logits.detach(), self.function_logits.detach(), self.dependency_head_indices.detach())
19
+
20
+ @dataclass
21
+ class SyntaxTaggingOutput(ModelOutput):
22
+ loss: Optional[torch.FloatTensor] = None
23
+ logits: Optional[SyntaxLogitsOutput] = None
24
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
25
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
26
+
27
+ @dataclass
28
+ class SyntaxLabels(ModelOutput):
29
+ dependency_labels: Optional[torch.LongTensor] = None
30
+ function_labels: Optional[torch.LongTensor] = None
31
+
32
+ def detach(self):
33
+ return SyntaxLabels(self.dependency_labels.detach(), self.function_labels.detach())
34
+
35
+ def to(self, device):
36
+ return SyntaxLabels(self.dependency_labels.to(device), self.function_labels.to(device))
37
+
38
+ class BertSyntaxParsingHead(nn.Module):
39
+ def __init__(self, config):
40
+ super().__init__()
41
+ self.config = config
42
+
43
+ # the attention query & key values
44
+ self.head_size = config.syntax_head_size# int(config.hidden_size / config.num_attention_heads * 2)
45
+ self.query = nn.Linear(config.hidden_size, self.head_size)
46
+ self.key = nn.Linear(config.hidden_size, self.head_size)
47
+ # the function classifier gets two encoding values and predicts the labels
48
+ self.num_function_classes = len(ALL_FUNCTION_LABELS)
49
+ self.cls = nn.Linear(config.hidden_size * 2, self.num_function_classes)
50
+
51
+ def forward(
52
+ self,
53
+ hidden_states: torch.Tensor,
54
+ extended_attention_mask: Optional[torch.Tensor],
55
+ labels: Optional[SyntaxLabels] = None,
56
+ compute_mst: bool = False) -> Tuple[torch.Tensor, SyntaxLogitsOutput]:
57
+
58
+ # Take the dot product between "query" and "key" to get the raw attention scores.
59
+ query_layer = self.query(hidden_states)
60
+ key_layer = self.key(hidden_states)
61
+ attention_scores = torch.bmm(query_layer, key_layer.transpose(-1, -2)) / math.sqrt(self.head_size)
62
+
63
+ # add in the attention mask
64
+ if extended_attention_mask is not None:
65
+ if extended_attention_mask.ndim == 4:
66
+ extended_attention_mask = extended_attention_mask.squeeze(1)
67
+ attention_scores += extended_attention_mask# batch x seq x seq
68
+
69
+ # At this point take the hidden_state of the word and of the dependency word, and predict the function
70
+ # If labels are provided, use the labels.
71
+ if self.training and labels is not None:
72
+ # Note that the labels can have -100, so just set those to zero with a max
73
+ dep_indices = labels.dependency_labels.clamp_min(0)
74
+ # Otherwise - check if he wants the MST or just the argmax
75
+ elif compute_mst:
76
+ dep_indices = compute_mst_tree(attention_scores)
77
+ else:
78
+ dep_indices = torch.argmax(attention_scores, dim=-1)
79
+
80
+ # After we retrieved the dependency indicies, create a tensor of teh batch indices, and and retrieve the vectors of the heads to calculate the function
81
+ batch_indices = torch.arange(dep_indices.size(0)).view(-1, 1).expand(-1, dep_indices.size(1)).to(dep_indices.device)
82
+ dep_vectors = hidden_states[batch_indices, dep_indices, :] # batch x seq x dim
83
+
84
+ # concatenate that with the last hidden states, and send to the classifier output
85
+ cls_inputs = torch.cat((hidden_states, dep_vectors), dim=-1)
86
+ function_logits = self.cls(cls_inputs)
87
+
88
+ loss = None
89
+ if labels is not None:
90
+ loss_fct = nn.CrossEntropyLoss()
91
+ # step 1: dependency scores loss - this is applied to the attention scores
92
+ loss = loss_fct(attention_scores.view(-1, hidden_states.size(-2)), labels.dependency_labels.view(-1))
93
+ # step 2: function loss
94
+ loss += loss_fct(function_logits.view(-1, self.num_function_classes), labels.function_labels.view(-1))
95
+
96
+ return (loss, SyntaxLogitsOutput(attention_scores, function_logits, dep_indices))
97
+
98
+
99
+ class BertForSyntaxParsing(BertPreTrainedModel):
100
+
101
+ def __init__(self, config):
102
+ super().__init__(config)
103
+
104
+ self.bert = BertModel(config, add_pooling_layer=False)
105
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
106
+ self.syntax = BertSyntaxParsingHead(config)
107
+
108
+ # Initialize weights and apply final processing
109
+ self.post_init()
110
+
111
+ def forward(
112
+ self,
113
+ input_ids: Optional[torch.Tensor] = None,
114
+ attention_mask: Optional[torch.Tensor] = None,
115
+ token_type_ids: Optional[torch.Tensor] = None,
116
+ position_ids: Optional[torch.Tensor] = None,
117
+ labels: Optional[SyntaxLabels] = None,
118
+ head_mask: Optional[torch.Tensor] = None,
119
+ inputs_embeds: Optional[torch.Tensor] = None,
120
+ output_attentions: Optional[bool] = None,
121
+ output_hidden_states: Optional[bool] = None,
122
+ return_dict: Optional[bool] = None,
123
+ compute_syntax_mst: Optional[bool] = None,
124
+ ):
125
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
126
+
127
+ bert_outputs = self.bert(
128
+ input_ids,
129
+ attention_mask=attention_mask,
130
+ token_type_ids=token_type_ids,
131
+ position_ids=position_ids,
132
+ head_mask=head_mask,
133
+ inputs_embeds=inputs_embeds,
134
+ output_attentions=output_attentions,
135
+ output_hidden_states=output_hidden_states,
136
+ return_dict=return_dict,
137
+ )
138
+
139
+ extended_attention_mask = None
140
+ if attention_mask is not None:
141
+ extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_ids.size())
142
+ # apply the syntax head
143
+ loss, logits = self.syntax(self.dropout(bert_outputs[0]), extended_attention_mask, labels, compute_syntax_mst)
144
+
145
+ if not return_dict:
146
+ return (loss,(logits.dependency_logits, logits.function_logits)) + bert_outputs[2:]
147
+
148
+ return SyntaxTaggingOutput(
149
+ loss=loss,
150
+ logits=logits,
151
+ hidden_states=bert_outputs.hidden_states,
152
+ attentions=bert_outputs.attentions,
153
+ )
154
+
155
+ def predict(self, sentences: Union[str, List[str]], tokenizer: BertTokenizerFast, compute_mst=True):
156
+ if isinstance(sentences, str):
157
+ sentences = [sentences]
158
+
159
+ # predict the logits for the sentence
160
+ inputs = tokenizer(sentences, padding='longest', truncation=True, return_tensors='pt')
161
+ inputs = {k:v.to(self.device) for k,v in inputs.items()}
162
+ logits = self.forward(**inputs, return_dict=True, compute_syntax_mst=compute_mst).logits
163
+
164
+ outputs = []
165
+ for i in range(len(sentences)):
166
+ deps = logits.dependency_head_indices[i].tolist()
167
+ funcs = logits.function_logits.argmax(-1)[i].tolist()
168
+ toks = tokenizer.convert_ids_to_tokens(inputs['input_ids'][i])[1:-1] # ignore cls and sep
169
+
170
+ # first, go through the tokens and create a mapping between each dependency index and the index without wordpieces
171
+ # wordpieces. At the same time, append the wordpieces in
172
+ idx_mapping = {-1:-1} # default root
173
+ real_idx = -1
174
+ for i in range(len(toks)):
175
+ if not toks[i].startswith('##'):
176
+ real_idx += 1
177
+ idx_mapping[i] = real_idx
178
+
179
+ # build our tree, keeping tracking of the root idx
180
+ tree = []
181
+ root_idx = 0
182
+ for i in range(len(toks)):
183
+ if toks[i].startswith('##'):
184
+ tree[-1]['word'] += toks[i][2:]
185
+ continue
186
+
187
+ dep_idx = deps[i + 1] - 1 # increase 1 for cls, decrease 1 for cls
188
+ dep_head = 'root' if dep_idx == -1 else toks[dep_idx]
189
+ dep_func = ALL_FUNCTION_LABELS[funcs[i + 1]]
190
+
191
+ if dep_head == 'root': root_idx = len(tree)
192
+ tree.append(dict(word=toks[i], dep_head_idx=idx_mapping[dep_idx], dep_head=dep_head, dep_func=dep_func))
193
+ outputs.append(dict(tree=tree, root_idx=root_idx))
194
+ return outputs
195
+
196
+
197
+ def compute_mst_tree(attention_scores: torch.Tensor):
198
+ # attention scores should be 3 dimensions - batch x seq x seq (if it is 2 - just unsqueeze)
199
+ if attention_scores.ndim == 2: attention_scores = attention_scores.unsqueeze(0)
200
+ if attention_scores.ndim != 3 or attention_scores.shape[1] != attention_scores.shape[2]:
201
+ raise ValueError(f'Expected attention scores to be of shape batch x seq x seq, instead got {attention_scores.shape}')
202
+
203
+ batch_size, seq_len, _ = attention_scores.shape
204
+ # start by softmaxing so the scores are comparable
205
+ attention_scores = attention_scores.softmax(dim=-1)
206
+
207
+ # set the values for the CLS and sep to all by very low, so they never get chosen as a replacement arc
208
+ attention_scores[:, 0, :] = -10000
209
+ attention_scores[:, -1, :] = -10000
210
+ attention_scores[:, :, -1] = -10000 # can never predict sep
211
+
212
+ # find the root, and make him super high so we never have a conflict
213
+ root_cands = torch.argsort(attention_scores[:, :, 0], dim=-1)
214
+ batch_indices = torch.arange(batch_size, device=root_cands.device)
215
+ attention_scores[batch_indices.unsqueeze(1), root_cands, 0] = -10000
216
+ attention_scores[batch_indices, root_cands[:, -1], 0] = 10000
217
+
218
+ # we start by getting the argmax for each score, and then computing the cycles and contracting them
219
+ sorted_indices = torch.argsort(attention_scores, dim=-1, descending=True)
220
+ indices = sorted_indices[:, :, 0].clone() # take the argmax
221
+
222
+ # go through each batch item and make sure our tree works
223
+ for batch_idx in range(batch_size):
224
+ # We have one root - detect the cycles and contract them. A cycle can never contain the root so really
225
+ # for every cycle, we look at all the nodes, and find the highest arc out of the cycle for any values. Replace that and tada
226
+ has_cycle, cycle_nodes = detect_cycle(indices[batch_idx])
227
+ while has_cycle:
228
+ base_idx, head_idx = choose_contracting_arc(indices[batch_idx], sorted_indices[batch_idx], cycle_nodes, attention_scores[batch_idx])
229
+ indices[batch_idx, base_idx] = head_idx
230
+ # find the next cycle
231
+ has_cycle, cycle_nodes = detect_cycle(indices[batch_idx])
232
+
233
+ return indices
234
+
235
+ def detect_cycle(indices: torch.LongTensor):
236
+ # Simple cycle detection algorithm
237
+ # Returns a boolean indicating if a cycle is detected and the nodes involved in the cycle
238
+ visited = set()
239
+ for node in range(1, len(indices) - 1): # ignore the CLS/SEP tokens
240
+ if node in visited:
241
+ continue
242
+ current_path = set()
243
+ while node not in visited:
244
+ visited.add(node)
245
+ current_path.add(node)
246
+ node = indices[node].item()
247
+ if node == 0: break # roots never point to anything
248
+ if node in current_path:
249
+ return True, current_path # Cycle detected
250
+ return False, None
251
+
252
+ def choose_contracting_arc(indices: torch.LongTensor, sorted_indices: torch.LongTensor, cycle_nodes: set, scores: torch.FloatTensor):
253
+ # Chooses the highest-scoring, non-cycling arc from a graph. Iterates through 'cycle_nodes' to find
254
+ # the best arc based on 'scores', avoiding cycles and zero node connections.
255
+ # For each node, we only look at the next highest scoring non-cycling arc
256
+ best_base_idx, best_head_idx = -1, -1
257
+ score = float('-inf')
258
+
259
+ # convert the indices to a list once, to avoid multiple conversions (saves a few seconds)
260
+ currents = indices.tolist()
261
+ for base_node in cycle_nodes:
262
+ # we don't want to take anything that has a higher score than the current value - we can end up in an endless loop
263
+ # Since the indices are sorted, as soon as we find our current item, we can move on to the next.
264
+ current = currents[base_node]
265
+ found_current = False
266
+
267
+ for head_node in sorted_indices[base_node].tolist():
268
+ if head_node == current:
269
+ found_current = True
270
+ continue
271
+ if not found_current or head_node in cycle_nodes or head_node == 0:
272
+ continue
273
+
274
+ current_score = scores[base_node, head_node].item()
275
+ if current_score > score:
276
+ best_base_idx, best_head_idx, score = base_node, head_node, current_score
277
+ break
278
+
279
+ return best_base_idx, best_head_idx