asahi417 commited on
Commit
1b48831
1 Parent(s): d48c484

fix dataset

Browse files
Files changed (2) hide show
  1. README.md +2 -4
  2. process/tweet_ner.py +62 -0
README.md CHANGED
@@ -62,14 +62,12 @@ The data fields are the same among all splits.
62
  #### tweet_qa
63
  - `text`: a `string` feature.
64
  - `gold_label_str`: a `string` feature.
65
- - `paragraph`: a `string` feature.
66
- - `question`: a `string` feature.
67
 
68
  #### tweet_qg
69
  - `text`: a `string` feature.
70
  - `gold_label_str`: a `string` feature.
71
- - `paragraph`: a `string` feature.
72
- - `question`: a `string` feature.
73
 
74
  #### tweet_intimacy
75
  - `text`: a `string` feature.
 
62
  #### tweet_qa
63
  - `text`: a `string` feature.
64
  - `gold_label_str`: a `string` feature.
65
+ - `context`: a `string` feature.
 
66
 
67
  #### tweet_qg
68
  - `text`: a `string` feature.
69
  - `gold_label_str`: a `string` feature.
70
+ - `context`: a `string` feature.
 
71
 
72
  #### tweet_intimacy
73
  - `text`: a `string` feature.
process/tweet_ner.py CHANGED
@@ -1,7 +1,66 @@
1
  import os
2
  import json
 
 
3
  from datasets import load_dataset
4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  os.makedirs("data/tweet_ner7", exist_ok=True)
6
  data = load_dataset("tner/tweetner7")
7
 
@@ -10,6 +69,9 @@ def process(tmp):
10
  tmp = [i.to_dict() for _, i in tmp.iterrows()]
11
  for i in tmp:
12
  i.pop("id")
 
 
 
13
  i['gold_label_sequence'] = i.pop('tags').tolist()
14
  i['text_tokenized'] = i.pop('tokens').tolist()
15
  i['text'] = ' '.join(i['text_tokenized'])
 
1
  import os
2
  import json
3
+ from typing import List
4
+ from pprint import pprint
5
  from datasets import load_dataset
6
 
7
+ label2id = {
8
+ "B-corporation": 0,
9
+ "B-creative_work": 1,
10
+ "B-event": 2,
11
+ "B-group": 3,
12
+ "B-location": 4,
13
+ "B-person": 5,
14
+ "B-product": 6,
15
+ "I-corporation": 7,
16
+ "I-creative_work": 8,
17
+ "I-event": 9,
18
+ "I-group": 10,
19
+ "I-location": 11,
20
+ "I-person": 12,
21
+ "I-product": 13,
22
+ "O": 14
23
+ }
24
+
25
+
26
+ def decode_ner_tags(tag_sequence: List, input_sequence: List):
27
+ """ decode ner tag sequence """
28
+ def update_collection(_tmp_entity, _tmp_entity_type, _tmp_pos, _out):
29
+ if len(_tmp_entity) != 0 and _tmp_entity_type is not None:
30
+ _out.append({'type': _tmp_entity_type, 'entity': _tmp_entity, 'position': _tmp_pos})
31
+ _tmp_entity = []
32
+ _tmp_entity_type = None
33
+ return _tmp_entity, _tmp_entity_type, _tmp_pos, _out
34
+
35
+ assert len(tag_sequence) == len(input_sequence), str([len(tag_sequence), len(input_sequence)])
36
+ out = []
37
+ tmp_entity = []
38
+ tmp_pos = []
39
+ tmp_entity_type = None
40
+ for n, (_l, _i) in enumerate(zip(tag_sequence, input_sequence)):
41
+ if _l.startswith('B-'):
42
+ _, _, _, out = update_collection(tmp_entity, tmp_entity_type, tmp_pos, out)
43
+ tmp_entity_type = '-'.join(_l.split('-')[1:])
44
+ tmp_entity = [_i]
45
+ tmp_pos = [n]
46
+ elif _l.startswith('I-'):
47
+ tmp_tmp_entity_type = '-'.join(_l.split('-')[1:])
48
+ if len(tmp_entity) == 0:
49
+ # if 'I' not start with 'B', skip it
50
+ tmp_entity, tmp_entity_type, tmp_pos, out = update_collection(tmp_entity, tmp_entity_type, tmp_pos, out)
51
+ elif tmp_tmp_entity_type != tmp_entity_type:
52
+ # if the type does not match with the B, skip
53
+ tmp_entity, tmp_entity_type, tmp_pos, out = update_collection(tmp_entity, tmp_entity_type, tmp_pos, out)
54
+ else:
55
+ tmp_entity.append(_i)
56
+ tmp_pos.append(n)
57
+ elif _l == 'O':
58
+ tmp_entity, tmp_entity_type, tmp_pos, out = update_collection(tmp_entity, tmp_entity_type, tmp_pos, out)
59
+ else:
60
+ raise ValueError('unknown tag: {}'.format(_l))
61
+ _, _, _, out = update_collection(tmp_entity, tmp_entity_type, tmp_pos, out)
62
+ return out
63
+
64
  os.makedirs("data/tweet_ner7", exist_ok=True)
65
  data = load_dataset("tner/tweetner7")
66
 
 
69
  tmp = [i.to_dict() for _, i in tmp.iterrows()]
70
  for i in tmp:
71
  i.pop("id")
72
+ entities = decode_ner_tags(i['tags'].tolist(), i['tokens'].tolist())
73
+ pprint(entities)
74
+ input()
75
  i['gold_label_sequence'] = i.pop('tags').tolist()
76
  i['text_tokenized'] = i.pop('tokens').tolist()
77
  i['text'] = ' '.join(i['text_tokenized'])