#! -*- coding:utf-8 -*-
# bert+crf用来做实体识别
# 数据集：http://s3.bmio.net/kashgari/china-people-daily-ner-corpus.tar.gz
# [valid_f1]  token_level: 97.06； entity_level: 95.90
# bert4torch==0.5.4
# 波佬改过的

import os
import time
import torch

from typing import List, Tuple, Dict, Optional
import numpy as np
from torch.utils.data import DataLoader
import torch.nn as nn
from bert4torch.snippets import sequence_padding, ListDataset, seed_everything
from bert4torch.layers import CRF
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel

maxlen = 64
batch_size = 1
labels = ["SHORTNAME"]

categories = ['O']
for label in labels:
    categories.append("B-" + label)
    categories.append("I-" + label)
categories_id2label = {i: k for i, k in enumerate(categories)}
categories_label2id = {k: i for i, k in enumerate(categories)}

device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
# https://hf-mirror.com/hfl/chinese-roberta-wwm-ext
# pretrained_model = "/data/pretrain_ckpt/roberta/hfl@chinese-roberta-wwm-ext-base"
pretrained_model = r"G:/nlp_about/pretrained_models/hfl-chinese-roberta-wwm-ext"
# 建立分词器
dict_path = os.path.join(pretrained_model, "vocab.txt")
config_path = os.path.join(pretrained_model, "config.json")
checkpoint_path = os.path.join(pretrained_model, "pytorch_model.bin")

tokenizer = Tokenizer(dict_path, do_lower_case=True)


# 定义bert上的模型结构
class Model(BaseModel):
    def __init__(self):
        super().__init__()

        self.bert = build_transformer_model(config_path=config_path, segment_vocab_size=0)

        self.fc = nn.Linear(768, len(categories))  # 包含首尾
        self.crf = CRF(len(categories))
        self.eval()

    def forward(self, token_ids):
        sequence_output = self.bert([token_ids])  # [btz, seq_len, hdsz]
        emission_score = self.fc(sequence_output)  # [btz, seq_len, tag_size]
        attention_mask = token_ids.gt(0).long()
        return emission_score, attention_mask

    def predict(self, token_ids):
        with torch.no_grad():
            emission_score, attention_mask = self.forward(token_ids)
            best_path = self.crf.decode(emission_score, attention_mask)  # [btz, seq_len]
        return best_path

    def predict_batch(self, token_list):
        return [self.predict(x) for x in token_list]


model = Model().to(device)


def trans_entity2tuple(scores):
    '''把tensor转为(样本id, start, end, 实体类型)的tuple用于计算指标
    '''
    batch_entity_ids = list()
    for i, one_samp in enumerate(scores):
        entity_ids = []
        for j, item in enumerate(one_samp):
            flag_tag = categories_id2label[item.item()]
            if flag_tag.startswith('B-'):  # B
                entity_ids.append([i, j, j, flag_tag[2:]])
            elif len(entity_ids) == 0:
                continue
            elif (len(entity_ids[-1]) > 0) and flag_tag.startswith('I-') and (flag_tag[2:] == entity_ids[-1][-1]):  # I
                entity_ids[-1][-2] = j
            elif len(entity_ids[-1]) > 0:
                entity_ids.append([])

        for i in entity_ids:
            if i:
                batch_entity_ids.append(tuple(i))
    return batch_entity_ids


def fill_predict_result(predict_result: list, temp_token_list: list, temp_label_list: list):
    temp = {"label": None, "text": None}
    label_list = [categories[i] for i in temp_label_list]
    label_list = [x.replace("B-", "").replace("I-", "") for x in label_list]
    assert len(set(label_list)) == 1, f"get more than 1 label in predict result:{set(label_list)}"

    temp['label'] = label_list[0]
    temp["text"] = tokenizer.decode(temp_token_list)
    predict_result.append(temp)
    return predict_result


def get_chinese_label(en_label: str) -> Optional[str]:
    label_map = {"SHORTNAME": "型号"}

    cn_label = label_map.get(en_label, None)
    return cn_label


def fill_outputs_batch(tokens: torch.Tensor, entity_lists: List[Tuple]):
    """
    :param entity_lists: [(0, 1, 2, 'BRAND'), (1, 1, 2, 'BRAND')]
    :return:
    """
    results = [{"label": None, "text": None}] * len(tokens)

    for index_, start, end1, label in entity_lists:
        temp = {"label": None, "text": None}    
        end = end1 + 1
        token_id = tokens[index_]
        token_text = tokenizer.decode(token_id[start: end])
        if token_text:
            label = get_chinese_label(label)
            temp["label"] = label
            temp["text"] = token_text.replace("_", " ")
        results[index_] = temp
    return results


# 加载数据集
class MyDataset(ListDataset):

    @staticmethod
    def load_data(texts: List[str]):
        D = []
        for _text in texts:
            d = ['']
            for i, char in enumerate(_text.split('\n')):
                d[0] += char
                d.append([i, i, "SHORTNAME"])
            D.append(d)
        return D


def collate_fn(batch):
    batch_token_ids, batch_labels = [], []
    for d in batch:
        tokens = tokenizer.tokenize(d[0], maxlen=maxlen)
        mapping = tokenizer.rematch(d[0], tokens)
        start_mapping = {j[0]: i for i, j in enumerate(mapping) if j}
        end_mapping = {j[-1]: i for i, j in enumerate(mapping) if j}
        token_ids = tokenizer.tokens_to_ids(tokens)
        labels = np.zeros(len(token_ids))
        for start, end, label in d[1:]:
            if start in start_mapping and end in end_mapping:
                start = start_mapping[start]
                end = end_mapping[end]
                labels[start] = categories_label2id['B-' + label]
                labels[start + 1:end + 1] = categories_label2id['I-' + label]
        batch_token_ids.append(token_ids)
        batch_labels.append(labels)
    batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
    batch_labels = torch.tensor(sequence_padding(batch_labels), dtype=torch.long, device=device)
    return batch_token_ids, batch_labels


num_workers = 0


def inference_ner_batch(text_list1: List[str], btz=10):
    # encode text
    __text_list = []
    for __text in text_list1:
        __text = __text.replace(" ", "_")
        __text_list.append(__text)

    data_loader = DataLoader(MyDataset(__text_list), batch_size=btz, shuffle=False, collate_fn=collate_fn, num_workers=num_workers)
    entity_list = []
    for batch_token_ids, batch_labels in data_loader:
        predict_output = model.predict(batch_token_ids)
        batch_pred = trans_entity2tuple(predict_output)
        batch_pred = fill_outputs_batch(batch_token_ids, batch_pred)
        entity_list.extend(batch_pred)

    return entity_list


if __name__ == '__main__':
    text_list = ['KVK 导向环，0805105 ，WR 75*70*12 合成树脂 售卖规格：1件',
                 '三德科技 控温仪306313，3022520 型号：SDDH(a）-FCD-3018 售卖规格：1个',
                 '静牌 基本型缠绕垫，φ370×285×5mm，304+石墨，JPZB000201 售卖规格：1件',
                 '杰牌传动JIE JRT齿轮减速电机，规格57，速比6.57，JRTKAF57DS112M4/6.57/A/M1/0°/4.0kW',
                 '桥防 紫铜排，T8ZTPP-6*30*3000 6*30*3000mm 售卖规格：1根',
                 '开尔照明 LED灯泡，护眼柱形灯，5W，白光，E27 T40，42×86，420lm 售卖规格：1个',
                 '费斯托 压力传感器，SDE3-D10D-B-HQ4-2P-M8，540209 SDE3系列 售卖规格：1个',
                 '起帆 聚氯乙烯绝缘和护套软电线，RVV-13*1.5 黑色 实物100米/卷（100米的整数倍下单），不含卸货 售卖规格：1米',
                 'CATO 2-甲基二苯甲酮，CCHM701634 CAS:131-58-8，>95%，250mg 售卖规格：250毫克/支',
                 '阳语/Sunlit 智能差压变送器（单法兰），YPDFN600Kpa/衬PTFE/DN50/100mm 1m/0.0025/100℃/HART 售卖规格：1台',
                 '世达 10件套高速钢全磨制黑金麻花钻2.8MM，52628 售卖规格：10个/套', '镁沃/MWO 全螺纹螺柱Ⅱ型六角螺母三组合，HG/T20634，M33-3.5*125，35CrMo 售卖规格：50套/箱', '镁沃/MWO 全螺纹螺柱带加厚螺母平垫弹垫，HG/T20634，M27-3.0*180，8.8级发黑 售卖规格：20套/箱',
                 '长安 细杆等长双头螺栓，GB901，M24*145，304，CA-BOTH-088 售卖规格：30个/箱', '东明/TONG DIN931半牙外六角螺栓带加厚螺母，M6-1.0*90，不锈钢316/A4 售卖规格：100套/包', '典点扣/DDK GB5783全牙发黑8.8级外六角螺栓带加厚螺母平垫弹垫，M24-3.0*70 售卖规格：35套/箱', '海尔 13L水气双调燃气热水器，JSQ25-13J(12T)，一价全包',
                 '麦克林/macklin 氨基乙腈盐酸盐，A823994-100g CAS：6011-14-9，98%，100g/瓶 售卖规格：1瓶', '德力西/DELIXI 塑壳断路器，DZ20125012503340 DZ20-1250/3340 1250A AC380V 售卖规格：1个', '阳语/Sunlit 插头式热电阻，YTRTNC/单4线/4/150 1m/1m/-200-600℃/KTG0.5/非防爆 售卖规格：1台',
                 '阳语/Sunlit 插头式热电阻，YTRTND/双支3线/3mm/100mm 2m/1m/-200-600℃/卡套G0.5/非防爆 售卖规格：1台', '远东 阻燃B类聚氯乙烯绝缘和护套铝塑复合带屏蔽钢带铠装控制软电缆，ZB-KVVRP3-22-450/750V-12*2.5', '科威弘达 铠装热电阻，WZPK2-391 L=15000*1000 M16*1.5',
                 '德力西/DELIXI 塑壳断路器，DZ20400TY2503WTP DZ20Y-400T/3300 250A 透明 无接线铜排 售卖规格：1个', '崮山 高压胶管总成，DN50*20m-38MPa，根', '博密 机械密封，H74N/63-G9-Q2BM5GF 碳化硅对石墨，FEP包覆氟橡胶+316弹簧 售卖规格：1个',
                 '阳语/Sunlit 热电偶，YTCSFNTMT182/K/4mm/300mm Inconel600/G0.5/带线盒无数显非防爆 售卖规格：1个', '哈德威 氯丁橡胶CR70 O型圈，170×4（内径*线径），单个价格，50的倍数起订', '杰牌传动JIE JRT齿轮减速电机，规格67，速比2.4，JRTRXF67DS112M4/2.4/BE/M1/0°/4.0kW',
                 'Raxwell 防爆新型木柄八角锤（重型），铝青铜，RTAH0133 2700g 售卖规格：1把']
    # text_list = [
    #     "东明/TONG DIN931半牙外六角螺栓带加厚螺母，M6-1.0*90，不锈钢316/A4 售卖规格：100套/包",
    #     # "南孚(NANFU)9V碱性电池1粒装 9v 适用于遥控玩具/烟雾报警器/无线麦克风/万用表/话筒/遥控器等 6LR61",
    #     # "晨光(M&G)文具K35/0.5mm黑色中性笔 按动笔 经典子弹头签字笔 办公用水笔 12支/盒",
    # ]

    # with open("./sku_name.txt", "r", encoding="utf-8") as f:
    #     text_list = f.readlines()

    text_list = [x.strip() for x in text_list if x.strip()]
    # model.load_weights("/home/lb/projects/bert4torch/test_local/batch_ner.py/short_name_epoch_2_steps_5621_f2_0.83622.pt")
    model.load_weights("./model/short_name_epoch_2_steps_5621_f2_0.83622.pt")
    model.eval()
    time.sleep(1)
    # 单挑样本推理

    # batch推理
    ts_start = time.time()
    result = inference_ner_batch(text_list, btz=4)
    ts_end = time.time()
    print("use time", ts_end - ts_start)
    # 0.12297940254211426
    for text, one in zip(text_list, result):
        print(text, one)
