arubenruben commited on
Commit
e669eb7
1 Parent(s): 3cf6699

commit files to HF hub

Browse files
Files changed (4) hide show
  1. config.json +1 -1
  2. special_tokens_map.json +35 -5
  3. srl.py +3 -11
  4. tokenizer_config.json +1 -0
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "neuralmind/bert-large-portuguese-cased",
3
  "architectures": [
4
  "BertForTokenClassification"
5
  ],
 
1
  {
2
+ "_name_or_path": "liaad/propbank_br_srl_bert_large_portuguese_cased",
3
  "architectures": [
4
  "BertForTokenClassification"
5
  ],
special_tokens_map.json CHANGED
@@ -1,7 +1,37 @@
1
  {
2
- "cls_token": "[CLS]",
3
- "mask_token": "[MASK]",
4
- "pad_token": "[PAD]",
5
- "sep_token": "[SEP]",
6
- "unk_token": "[UNK]"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  }
 
1
  {
2
+ "cls_token": {
3
+ "content": "[CLS]",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "mask_token": {
10
+ "content": "[MASK]",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "[PAD]",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "sep_token": {
24
+ "content": "[SEP]",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "unk_token": {
31
+ "content": "[UNK]",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ }
37
  }
srl.py CHANGED
@@ -38,17 +38,7 @@ class SRLPipeline(Pipeline):
38
  current_word = word_id
39
  label = -100 if word_id is None else labels[word_id]
40
  new_labels.append(label)
41
- elif word_id is None:
42
- # Special token
43
- new_labels.append(-100)
44
  else:
45
- """
46
- # Same word as previous token
47
- label = labels[word_id]
48
- # If the label is B-XXX we change it to I-XXX
49
- if label % 2 == 1:
50
- label += 1
51
- """
52
  new_labels.append(-100)
53
 
54
  results.append(new_labels)
@@ -121,8 +111,10 @@ class SRLPipeline(Pipeline):
121
  if label != -100:
122
  true_predictions.append(self.label_names[prediction])
123
 
 
 
124
  outputs.append({
125
- "tokens": self.text.split(),
126
  "predictions": true_predictions,
127
  "verb": self.verbs[i]
128
  })
 
38
  current_word = word_id
39
  label = -100 if word_id is None else labels[word_id]
40
  new_labels.append(label)
 
 
 
41
  else:
 
 
 
 
 
 
 
42
  new_labels.append(-100)
43
 
44
  results.append(new_labels)
 
111
  if label != -100:
112
  true_predictions.append(self.label_names[prediction])
113
 
114
+ doc = self.nlp(self.text.strip())
115
+
116
  outputs.append({
117
+ "tokens": [token.text for token in doc],
118
  "predictions": true_predictions,
119
  "verb": self.verbs[i]
120
  })
tokenizer_config.json CHANGED
@@ -1,4 +1,5 @@
1
  {
 
2
  "added_tokens_decoder": {
3
  "0": {
4
  "content": "[PAD]",
 
1
  {
2
+ "add_prefix_space": true,
3
  "added_tokens_decoder": {
4
  "0": {
5
  "content": "[PAD]",