import os
from transformers import AutoTokenizer
import json

model_name = 'bert-base-chinese'
tokenizer = AutoTokenizer.from_pretrained(model_name)

# NER类型到id， id到类型的映射
ner_types = ['bod', 'dep', 'dis', 'dru', 'equ', 'ite', 'mic', 'pro', 'sym']
ner_types_all = ['START', 'END', 'O']
for xtype in ner_types:
    ner_types_all.extend(['B-' + xtype, 'I-' + xtype])
id2type, type2id = {}, {}
for i, xtype in enumerate(ner_types_all):
    id2type[i] = xtype
    type2id[xtype] = i
print(id2type)
print(type2id)


def make_x_and_y(xpath, xlimit=0):
    print(f'Reading {xpath}')
    with open(xpath, 'r', encoding='utf8') as fi:
        xcont = fi.read()
        xlen = len(xcont)
        print(f'Length: {xlen:,d}')
        xdata = json.loads(xcont)

    def get_tokens_of_substr(xstr):
        nonlocal xpos, xpos_end
        if xpos >= xpos_end:
            return []
        xpart = xstr[xpos:xpos_end]
        xpos = xpos_end
        xids = tokenizer.encode(xpart, add_special_tokens=False)
        return xids

    all_features = []
    all_labels = []
    cnt = -1
    for xobj in xdata:
        cnt += 1
        if xlimit and cnt >= xlimit:
            break

        # 开头
        xfeatures = [tokenizer.cls_token_id]
        xlabels = [type2id['START']]

        xline = xobj['text']
        xlen_line = len(xline)
        xpos = 0
        xpos_end = xlen_line - 1
        for xen_obj in xobj['entities']:
            # 每个NER标注

            xtype = xen_obj['type']
            xstart = xen_obj['start_idx']
            xend = xen_obj['end_idx']

            # 将之前的普通文本打上普通标签
            xpos_end = xstart - 1
            xo_tokens = get_tokens_of_substr(xline)
            xlen_o_tokens = len(xo_tokens)
            xfeatures.extend(xo_tokens)
            xlabels.extend([type2id['O'] * xlen_o_tokens])

            # NER打上NER标签
            xpos = xstart
            xpos_end = xend
            xner_tokens = get_tokens_of_substr(xline)
            xlen_ner_tokens = len(xner_tokens)
            xfeatures.extend(xner_tokens)
            xlabels.extend([type2id['B-' + xtype]])
            xlabels.extend([type2id['I-' + xtype]] * (xlen_ner_tokens - 1))
            xpos = xend

        # 剩余的普通文本
        xpos_end = xlen_line
        xo_tokens = get_tokens_of_substr(xline)
        xlen_o_tokens = len(xo_tokens)
        xfeatures.extend(xo_tokens)
        xlabels.extend([type2id['O'] * xlen_o_tokens])

        # 结尾
        xfeatures.append(tokenizer.sep_token_id)
        xlabels.append(type2id['END'])

        # 放入返回数组
        all_features.append(xfeatures)
        all_labels.append(xlabels)
    return all_features, all_labels


if '__main__' == __name__:

    def _main():

        all_features, all_labels = make_x_and_y(r'D:\_dell7590_root\local\LNP_datasets\med\CBLUE\CMeEE-V2\CMeEE-V2_dev.json', 4)
        print(all_features)
        print(all_labels)

        for i, (x, y, ) in enumerate(zip(all_features, all_labels)):
            print(i)
            print(tokenizer.convert_ids_to_tokens(x))
            print(y)

    _main()
