super_tweeteval / process /tweet_ner.py
asahi417's picture
fix dataset
f681059
raw
history blame
3.45 kB
import os
import json
from typing import List
from pprint import pprint
from datasets import load_dataset
label2id = {
"B-corporation": 0,
"B-creative_work": 1,
"B-event": 2,
"B-group": 3,
"B-location": 4,
"B-person": 5,
"B-product": 6,
"I-corporation": 7,
"I-creative_work": 8,
"I-event": 9,
"I-group": 10,
"I-location": 11,
"I-person": 12,
"I-product": 13,
"O": 14
}
id2label = {v: k for k, v in label2id.items()}
def decode_ner_tags(tag_sequence: List, input_sequence: List):
""" decode ner tag sequence """
def update_collection(_tmp_entity, _tmp_entity_type, _tmp_pos, _out):
if len(_tmp_entity) != 0 and _tmp_entity_type is not None:
_out.append({'type': _tmp_entity_type, 'entity': _tmp_entity, 'position': _tmp_pos})
_tmp_entity = []
_tmp_entity_type = None
return _tmp_entity, _tmp_entity_type, _tmp_pos, _out
assert len(tag_sequence) == len(input_sequence), str([len(tag_sequence), len(input_sequence)])
out = []
tmp_entity = []
tmp_pos = []
tmp_entity_type = None
for n, (_l, _i) in enumerate(zip(tag_sequence, input_sequence)):
_l = id2label[_l]
if _l.startswith('B-'):
_, _, _, out = update_collection(tmp_entity, tmp_entity_type, tmp_pos, out)
tmp_entity_type = '-'.join(_l.split('-')[1:])
tmp_entity = [_i]
tmp_pos = [n]
elif _l.startswith('I-'):
tmp_tmp_entity_type = '-'.join(_l.split('-')[1:])
if len(tmp_entity) == 0:
# if 'I' not start with 'B', skip it
tmp_entity, tmp_entity_type, tmp_pos, out = update_collection(tmp_entity, tmp_entity_type, tmp_pos, out)
elif tmp_tmp_entity_type != tmp_entity_type:
# if the type does not match with the B, skip
tmp_entity, tmp_entity_type, tmp_pos, out = update_collection(tmp_entity, tmp_entity_type, tmp_pos, out)
else:
tmp_entity.append(_i)
tmp_pos.append(n)
elif _l == 'O':
tmp_entity, tmp_entity_type, tmp_pos, out = update_collection(tmp_entity, tmp_entity_type, tmp_pos, out)
else:
raise ValueError('unknown tag: {}'.format(_l))
_, _, _, out = update_collection(tmp_entity, tmp_entity_type, tmp_pos, out)
return out
os.makedirs("data/tweet_ner7", exist_ok=True)
data = load_dataset("tner/tweetner7")
def process(tmp):
tmp = [i.to_dict() for _, i in tmp.iterrows()]
for i in tmp:
i.pop("id")
entities = decode_ner_tags(i['tags'].tolist(), i['tokens'].tolist())
for e in entities:
e.pop("position")
e["entity"] = " ".join(e["entity"])
i['gold_label_sequence'] = i.pop('tags').tolist()
i['text_tokenized'] = i.pop('tokens').tolist()
i['text'] = ' '.join(i['text_tokenized'])
i['entities'] = entities
return tmp
train = process(data["train_2020"].to_pandas())
val = process(data["validation_2020"].to_pandas())
test = process(data["test_2021"].to_pandas())
with open("data/tweet_ner7/train.jsonl", "w") as f:
f.write("\n".join([json.dumps(i) for i in train]))
with open("data/tweet_ner7/validation.jsonl", "w") as f:
f.write("\n".join([json.dumps(i) for i in val]))
with open("data/tweet_ner7/test.jsonl", "w") as f:
f.write("\n".join([json.dumps(i) for i in test]))