|
import os |
|
import json |
|
from typing import List |
|
from datasets import load_dataset |
|
|
|
label2id = { |
|
"B-corporation": 0, |
|
"B-creative_work": 1, |
|
"B-event": 2, |
|
"B-group": 3, |
|
"B-location": 4, |
|
"B-person": 5, |
|
"B-product": 6, |
|
"I-corporation": 7, |
|
"I-creative_work": 8, |
|
"I-event": 9, |
|
"I-group": 10, |
|
"I-location": 11, |
|
"I-person": 12, |
|
"I-product": 13, |
|
"O": 14 |
|
} |
|
id2label = {v: k for k, v in label2id.items()} |
|
|
|
|
|
def decode_ner_tags(tag_sequence: List, input_sequence: List): |
|
""" decode ner tag sequence """ |
|
|
|
def update_collection(_tmp_entity, _tmp_entity_type, _tmp_pos, _out): |
|
if len(_tmp_entity) != 0 and _tmp_entity_type is not None: |
|
_out.append({'type': _tmp_entity_type, 'entity': _tmp_entity, 'position': _tmp_pos}) |
|
_tmp_entity = [] |
|
_tmp_entity_type = None |
|
return _tmp_entity, _tmp_entity_type, _tmp_pos, _out |
|
|
|
assert len(tag_sequence) == len(input_sequence), str([len(tag_sequence), len(input_sequence)]) |
|
out = [] |
|
tmp_entity = [] |
|
tmp_pos = [] |
|
tmp_entity_type = None |
|
for n, (_l, _i) in enumerate(zip(tag_sequence, input_sequence)): |
|
_l = id2label[_l] |
|
if _l.startswith('B-'): |
|
_, _, _, out = update_collection(tmp_entity, tmp_entity_type, tmp_pos, out) |
|
tmp_entity_type = '-'.join(_l.split('-')[1:]) |
|
tmp_entity = [_i] |
|
tmp_pos = [n] |
|
elif _l.startswith('I-'): |
|
tmp_tmp_entity_type = '-'.join(_l.split('-')[1:]) |
|
if len(tmp_entity) == 0: |
|
|
|
tmp_entity, tmp_entity_type, tmp_pos, out = update_collection(tmp_entity, tmp_entity_type, tmp_pos, out) |
|
elif tmp_tmp_entity_type != tmp_entity_type: |
|
|
|
tmp_entity, tmp_entity_type, tmp_pos, out = update_collection(tmp_entity, tmp_entity_type, tmp_pos, out) |
|
else: |
|
tmp_entity.append(_i) |
|
tmp_pos.append(n) |
|
elif _l == 'O': |
|
tmp_entity, tmp_entity_type, tmp_pos, out = update_collection(tmp_entity, tmp_entity_type, tmp_pos, out) |
|
else: |
|
raise ValueError('unknown tag: {}'.format(_l)) |
|
_, _, _, out = update_collection(tmp_entity, tmp_entity_type, tmp_pos, out) |
|
return out |
|
|
|
data = load_dataset("tner/tweetner7") |
|
|
|
|
|
def process(tmp): |
|
tmp = [i.to_dict() for _, i in tmp.iterrows()] |
|
for i in tmp: |
|
tokens = ["@" + t.replace("{@", "").replace("@}", "").replace(" ", "_") if t.startswith("{@") else t |
|
for t in i.pop('tokens')] |
|
entities = [] |
|
for e in decode_ner_tags(i.pop('tags').tolist(), tokens): |
|
entities.append(f'{" ".join(e["entity"])} ({e["type"]})') |
|
i['text'] = " ".join(tokens).replace("{{USERNAME}}", "@user").replace("{{URL}}", "{URL}") |
|
i['condition'] = f'Entities: {", ".join(entities)}' |
|
return tmp |
|
|
|
|
|
train = process(data["train_2020"].to_pandas()) |
|
train += process(data["train_2021"].to_pandas()) |
|
val = process(data["validation_2020"].to_pandas()) |
|
val += process(data["validation_2021"].to_pandas()) |
|
test = process(data["test_2021"].to_pandas()) |
|
os.makedirs("dataset/ner", exist_ok=True) |
|
with open("dataset/ner/train.jsonl", "w") as f: |
|
f.write("\n".join([json.dumps(i) for i in train])) |
|
with open("dataset/ner/validation.jsonl", "w") as f: |
|
f.write("\n".join([json.dumps(i) for i in val])) |
|
with open("dataset/ner/test.jsonl", "w") as f: |
|
f.write("\n".join([json.dumps(i) for i in test])) |
|
|
|
|
|
|
|
|