#! -*- coding:utf-8 -*-
# bert+crf用来做实体识别
# 数据集：http://s3.bmio.net/kashgari/china-people-daily-ner-corpus.tar.gz
# [valid_f1]  token_level: 97.06； entity_level: 95.90
# bert4torch==0.5.4
# 批量推理

import traceback
import os
import time
import torch
from tqdm import tqdm
from typing import List, Tuple, Dict, Optional
import numpy as np
from torch.utils.data import DataLoader
import torch.nn as nn
from bert4torch.snippets import sequence_padding, ListDataset, seed_everything
from bert4torch.layers import CRF
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel

maxlen = 64
batch_size = 200
labels = ["SHORTNAME"]

categories = ['O']
for label in labels:
    categories.append("B-" + label)
    categories.append("I-" + label)
categories_id2label = {i: k for i, k in enumerate(categories)}
categories_label2id = {k: i for i, k in enumerate(categories)}
"""
categories_id2label = {0: 'O',
 1: 'B-BRAND',
 2: 'I-BRAND',
 3: 'B-MODEL',
 4: 'I-MODEL',
 5: 'B-NAME',
 6: 'I-NAME',
 7: 'B-COLOR',
 8: 'I-COLOR',
 9: 'B-SPECS',
 10: 'I-SPECS',
 11: 'B-UNIT',
 12: 'I-UNIT'}

categories_label2id = {'O': 0,
 'B-BRAND': 1,
 'I-BRAND': 2,
 'B-MODEL': 3,
 'I-MODEL': 4,
 'B-NAME': 5,
 'I-NAME': 6,
 'B-COLOR': 7,
 'I-COLOR': 8,
 'B-SPECS': 9,
 'I-SPECS': 10,
 'B-UNIT': 11,
 'I-UNIT': 12}


"""

device = 'cuda:0' if torch.cuda.is_available() else 'cpu'

pretrained_model = r"G:/nlp_about/pretrained_models/hfl-chinese-roberta-wwm-ext"
# 建立分词器
dict_path = os.path.join(pretrained_model, "vocab.txt")
config_path = os.path.join(pretrained_model, "config.json")
checkpoint_path = os.path.join(pretrained_model, "pytorch_model.bin")

tokenizer = Tokenizer(dict_path, do_lower_case=True)


# 定义bert上的模型结构
class Model(BaseModel):
    def __init__(self):
        super().__init__()

        self.bert = build_transformer_model(config_path=config_path, segment_vocab_size=0)

        self.fc = nn.Linear(768, len(categories))  # 包含首尾
        self.crf = CRF(len(categories))
        self.eval()

    def forward(self, token_ids):
        sequence_output = self.bert([token_ids])  # [btz, seq_len, hdsz]
        emission_score = self.fc(sequence_output)  # [btz, seq_len, tag_size]
        attention_mask = token_ids.gt(0).long()
        return emission_score, attention_mask

    def predict(self, token_ids):
        with torch.no_grad():
            emission_score, attention_mask = self.forward(token_ids)
            best_path = self.crf.decode(emission_score, attention_mask)  # [btz, seq_len]
        return best_path

    def predict_batch(self, token_list):
        return [self.predict(x) for x in token_list]


model = Model().to(device)


def trans_entity2tuple(scores):
    '''把tensor转为(样本id, start, end, 实体类型)的tuple用于计算指标
    '''
    batch_entity_ids = list()
    for i, one_samp in enumerate(scores):
        entity_ids = []
        for j, item in enumerate(one_samp):
            flag_tag = categories_id2label[item.item()]
            if flag_tag.startswith('B-'):  # B
                entity_ids.append([i, j, j, flag_tag[2:]])
            elif len(entity_ids) == 0:
                continue
            elif (len(entity_ids[-1]) > 0) and flag_tag.startswith('I-') and (flag_tag[2:] == entity_ids[-1][-1]):  # I
                entity_ids[-1][-2] = j
            elif len(entity_ids[-1]) > 0:
                entity_ids.append([])

        for i in entity_ids:
            if i:
                batch_entity_ids.append(tuple(i))
    return batch_entity_ids


def fill_predict_result(predict_result: list, temp_token_list: list, temp_label_list: list):
    temp = {"label": None, "text": None}
    label_list = [categories[i] for i in temp_label_list]
    label_list = [x.replace("B-", "").replace("I-", "") for x in label_list]
    assert len(set(label_list)) == 1, f"get more than 1 label in predict result:{set(label_list)}"

    temp['label'] = label_list[0]
    temp["text"] = tokenizer.decode(temp_token_list)
    predict_result.append(temp)
    return predict_result


def get_chinese_label(en_label: str) -> Optional[str]:
    label_map = {"BRAND": "品牌"}

    cn_label = label_map.get(en_label, None)
    return cn_label


def fill_outputs_batch(text_list: List[str], entity_lists: List[Tuple], tokens: List):
    """

    :param text_list:[]
    :param entity_lists: [(0, 1, 2, 'BRAND'), (1, 1, 2, 'BRAND')]
    :return:
    """
    results = [{"label": None, "text": None, "done": False}] * len(text_list)

    # for index, origin_text in enumerate(text_list):
    #     temp = {"label": None, "text": None, "done": False}
    #     try:
    #         entity = entity_lists[index]
    #         token_id = tokens[index]
    #         text_index, start, end1, label = entity
    #         end = end1 + 1
    #         _tokens = token_id[start: end]
    #         token_text = tokenizer.decode(_tokens)
    #         is_done = results[index].get("done", False)
    #         if token_text and not is_done:
    #             label = get_chinese_label(label)
    #             temp["label"] = label
    #             temp["text"] = token_text
    #             temp["done"] = True
    #             results[index] = temp
    #     except Exception as e:
    #         print(traceback.format_exc())
    #         continue
    # return results

    for origin_text, entity, token_id in zip(text_list, entity_lists, tokens):
        temp = {"label": None, "text": None}
        text_index, start, end1, label = entity
        end = end1 + 1

        tokens = token_id[start: end]
        token_text = tokenizer.decode(tokens)
        origin_index = text_list.index(origin_text)
        is_done = results[text_index].get("done", False)
        if token_text and not is_done:
            label = get_chinese_label(label)
            temp["label"] = label
            temp["text"] = token_text
            temp["done"] = True
        results[origin_index] = temp

    return results


# 加载数据集
class MyDataset(ListDataset):

    @staticmethod
    def load_data(texts: List[str]):
        D = []
        for _text in texts:
            d = ['']
            for i, char in enumerate(_text.split('\n')):
                d[0] += char
                d.append([i, i, "SHORTNAME"])
            D.append(d)
        return D


def collate_fn(batch):
    batch_token_ids, batch_labels = [], []
    for d in batch:
        tokens = tokenizer.tokenize(d[0], maxlen=maxlen)
        mapping = tokenizer.rematch(d[0], tokens)
        start_mapping = {j[0]: i for i, j in enumerate(mapping) if j}
        end_mapping = {j[-1]: i for i, j in enumerate(mapping) if j}
        token_ids = tokenizer.tokens_to_ids(tokens)
        labels = np.zeros(len(token_ids))
        for start, end, label in d[1:]:
            if start in start_mapping and end in end_mapping:
                start = start_mapping[start]
                end = end_mapping[end]
                labels[start] = categories_label2id['B-' + label]
                labels[start + 1:end + 1] = categories_label2id['I-' + label]
        batch_token_ids.append(token_ids)
        batch_labels.append(labels)
    batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
    batch_labels = torch.tensor(sequence_padding(batch_labels), dtype=torch.long, device=device)
    return batch_token_ids, batch_labels


num_workers = 0


def inference_ner_batch(text_list1: List[str]):
    # encode text
    text_list = []
    for text in text_list1:
        text = text.replace(" ", "_")
        text_list.append(text)
    data_loader = DataLoader(MyDataset(text_list), batch_size=batch_size, shuffle=False, collate_fn=collate_fn, num_workers=num_workers)
    predict_output = []
    batch_token_list = []
    for batch_token_ids, batch_labels in data_loader:
        batch_token_list.extend(batch_token_ids)
        outs = model.predict(batch_token_ids)
        predict_output.extend(outs)
    # print(predict_output)
    # entity_list = [trans_entity2tuple(x) for x in predict_output]
    entity_list1 = trans_entity2tuple(predict_output)
    print("entity_list1=", entity_list1)
    entity_list = fill_outputs_batch(text_list, entity_list1, batch_token_list)
    # print("entity_list2=", entity_list)

    return entity_list


def main():
    text_list = ['KVK 导向环，0805105 ，WR 75*70*12 合成树脂 售卖规格：1件',
                 '三德科技 控温仪306313，3022520 型号：SDDH(a）-FCD-3018 售卖规格：1个',
                 '静牌 基本型缠绕垫，φ370×285×5mm，304+石墨，JPZB000201 售卖规格：1件',
                 '杰牌传动JIE JRT齿轮减速电机，规格57，速比6.57，JRTKAF57DS112M4/6.57/A/M1/0°/4.0kW',
                 '桥防 紫铜排，T8ZTPP-6*30*3000 6*30*3000mm 售卖规格：1根',
                 '开尔照明 LED灯泡，护眼柱形灯，5W，白光，E27 T40，42×86，420lm 售卖规格：1个',
                 '费斯托 压力传感器，SDE3-D10D-B-HQ4-2P-M8，540209 SDE3系列 售卖规格：1个',
                 '起帆 聚氯乙烯绝缘和护套软电线，RVV-13*1.5 黑色 实物100米/卷（100米的整数倍下单），不含卸货 售卖规格：1米',
                 'CATO 2-甲基二苯甲酮，CCHM701634 CAS:131-58-8，>95%，250mg 售卖规格：250毫克/支',
                 '阳语/Sunlit 智能差压变送器（单法兰），YPDFN600Kpa/衬PTFE/DN50/100mm 1m/0.0025/100℃/HART 售卖规格：1台',
                 '世达 10件套高速钢全磨制黑金麻花钻2.8MM，52628 售卖规格：10个/套', '镁沃/MWO 全螺纹螺柱Ⅱ型六角螺母三组合，HG/T20634，M33-3.5*125，35CrMo 售卖规格：50套/箱', '镁沃/MWO 全螺纹螺柱带加厚螺母平垫弹垫，HG/T20634，M27-3.0*180，8.8级发黑 售卖规格：20套/箱',
                 '长安 细杆等长双头螺栓，GB901，M24*145，304，CA-BOTH-088 售卖规格：30个/箱', '东明/TONG DIN931半牙外六角螺栓带加厚螺母，M6-1.0*90，不锈钢316/A4 售卖规格：100套/包', '典点扣/DDK GB5783全牙发黑8.8级外六角螺栓带加厚螺母平垫弹垫，M24-3.0*70 售卖规格：35套/箱', '海尔 13L水气双调燃气热水器，JSQ25-13J(12T)，一价全包',
                 '麦克林/macklin 氨基乙腈盐酸盐，A823994-100g CAS：6011-14-9，98%，100g/瓶 售卖规格：1瓶', '德力西/DELIXI 塑壳断路器，DZ20125012503340 DZ20-1250/3340 1250A AC380V 售卖规格：1个', '阳语/Sunlit 插头式热电阻，YTRTNC/单4线/4/150 1m/1m/-200-600℃/KTG0.5/非防爆 售卖规格：1台',
                 '阳语/Sunlit 插头式热电阻，YTRTND/双支3线/3mm/100mm 2m/1m/-200-600℃/卡套G0.5/非防爆 售卖规格：1台', '远东 阻燃B类聚氯乙烯绝缘和护套铝塑复合带屏蔽钢带铠装控制软电缆，ZB-KVVRP3-22-450/750V-12*2.5', '科威弘达 铠装热电阻，WZPK2-391 L=15000*1000 M16*1.5',
                 '德力西/DELIXI 塑壳断路器，DZ20400TY2503WTP DZ20Y-400T/3300 250A 透明 无接线铜排 售卖规格：1个', '崮山 高压胶管总成，DN50*20m-38MPa，根', '博密 机械密封，H74N/63-G9-Q2BM5GF 碳化硅对石墨，FEP包覆氟橡胶+316弹簧 售卖规格：1个',
                 '阳语/Sunlit 热电偶，YTCSFNTMT182/K/4mm/300mm Inconel600/G0.5/带线盒无数显非防爆 售卖规格：1个', '哈德威 氯丁橡胶CR70 O型圈，170×4（内径*线径），单个价格，50的倍数起订', '杰牌传动JIE JRT齿轮减速电机，规格67，速比2.4，JRTRXF67DS112M4/2.4/BE/M1/0°/4.0kW',
                 'Raxwell 防爆新型木柄八角锤（重型），铝青铜，RTAH0133 2700g 售卖规格：1把']

    # text_list = ['KVK 导向环，0805105 ，WR 75*70*12 合成树脂 售卖规格：1件', '三德科技 控温仪306313，3022520 型号：SDDH(a）-FCD-3018 售卖规格：1个']
    text_list = ['东明/TONG DIN931半牙外六角螺栓带加厚螺母，M6-1.0*90，不锈钢316/A4 售卖规格：100套/包']
    model.load_weights("./model/short_name_epoch_2_steps_5621_f2_0.83622.pt")

    model.eval()
    time.sleep(1)
    start = time.time()
    result = inference_ner_batch(text_list)
    end = time.time()
    print("use time", end - start)
    # 0.12297940254211426
    print(result)
    for text, one in zip(text_list, result):
        print(text, one)


def inference2():
    text_list = ['KVK 导向环，0805105 ，WR 75*70*12 合成树脂 售卖规格：1件', '三德科技 控温仪306313，3022520 型号：SDDH(a）-FCD-3018 售卖规格：1个']
    input_ids = []
    for text in text_list:
        input_id = tokenizer.encode(text, maxlen=maxlen)[0]
        input_ids.append(input_id)
    input_ids = sequence_padding(input_ids)
    scores = model.predict(torch.tensor(input_ids))
    print(scores)

    results = []
    for score in scores:
        res = {}
        for l, start, end in zip(*np.where(score > 0.5)):
            res[tokenizer.decode(input_ids[0][start:end + 1])] = categories_id2label[l]
        results.append(res)
    print(results)


if __name__ == '__main__':
    main()
    # inference2()
