#! -*- coding:utf-8 -*-
# bert+crf用来做实体识别
# 数据集：http://s3.bmio.net/kashgari/china-people-daily-ner-corpus.tar.gz
# [valid_f1]  token_level: 97.06； entity_level: 95.90
# bert4torch==0.5.4


import os
import time
import torch

from typing import List, Tuple, Dict, Optional
import numpy as np
from torch.utils.data import DataLoader
import torch.nn as nn
from bert4torch.snippets import sequence_padding, ListDataset, seed_everything
from bert4torch.layers import CRF
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel

maxlen = 64
batch_size = 200
labels = ["BRANDMODEL"]

categories = ['O']
for label in labels:
    categories.append("B-" + label)
    categories.append("I-" + label)
categories_id2label = {i: k for i, k in enumerate(categories)}
categories_label2id = {k: i for i, k in enumerate(categories)}

device = 'cuda:0' if torch.cuda.is_available() else 'cpu'

pretrained_model = r"G:/nlp_about/pretrained_models/hfl-chinese-roberta-wwm-ext"
# 建立分词器
dict_path = os.path.join(pretrained_model, "vocab.txt")
config_path = os.path.join(pretrained_model, "config.json")
checkpoint_path = os.path.join(pretrained_model, "pytorch_model.bin")

tokenizer = Tokenizer(dict_path, do_lower_case=True)


# 定义bert上的模型结构
class Model(BaseModel):
    def __init__(self):
        super().__init__()

        self.bert = build_transformer_model(config_path=config_path, segment_vocab_size=0)

        self.fc = nn.Linear(768, len(categories))  # 包含首尾
        self.crf = CRF(len(categories))
        self.eval()

    def forward(self, token_ids):
        sequence_output = self.bert([token_ids])  # [btz, seq_len, hdsz]
        emission_score = self.fc(sequence_output)  # [btz, seq_len, tag_size]
        attention_mask = token_ids.gt(0).long()
        return emission_score, attention_mask

    def predict(self, token_ids):
        with torch.no_grad():
            emission_score, attention_mask = self.forward(token_ids)
            best_path = self.crf.decode(emission_score, attention_mask)  # [btz, seq_len]
        return best_path

    def predict_batch(self, token_list):
        return [self.predict(x) for x in token_list]


model = Model().to(device)


def trans_entity2tuple(scores):
    '''把tensor转为(样本id, start, end, 实体类型)的tuple用于计算指标
    '''
    batch_entity_ids = list()
    for i, one_samp in enumerate(scores):
        entity_ids = []
        for j, item in enumerate(one_samp):
            flag_tag = categories_id2label[item.item()]
            if flag_tag.startswith('B-'):  # B
                entity_ids.append([i, j, j, flag_tag[2:]])
            elif len(entity_ids) == 0:
                continue
            elif (len(entity_ids[-1]) > 0) and flag_tag.startswith('I-') and (flag_tag[2:] == entity_ids[-1][-1]):  # I
                entity_ids[-1][-2] = j
            elif len(entity_ids[-1]) > 0:
                entity_ids.append([])

        for i in entity_ids:
            if i:
                batch_entity_ids.append(tuple(i))
    return batch_entity_ids


def fill_predict_result(predict_result: list, temp_token_list: list, temp_label_list: list):
    temp = {"label": None, "text": None}
    label_list = [categories[i] for i in temp_label_list]
    label_list = [x.replace("B-", "").replace("I-", "") for x in label_list]
    assert len(set(label_list)) == 1, f"get more than 1 label in predict result:{set(label_list)}"

    temp['label'] = label_list[0]
    temp["text"] = tokenizer.decode(temp_token_list)
    predict_result.append(temp)
    return predict_result


def get_chinese_label(en_label: str) -> Optional[str]:
    label_map = {"BRANDMODEL": "型号"}

    cn_label = label_map.get(en_label, None)
    return cn_label


def fill_outputs_batch(text_list: List[str], entity_lists: List[Tuple], tokens: List):
    """

    :param text_list:['得力(deli)12#订书机学生钉书机标准订书器_颜色随机1台_0308', '得力(deli)φ6mm可打10页手握式单孔打孔器打孔机_颜色随机0115']
    :param entity_lists: [(0, 1, 2, 'BRAND'), (1, 1, 2, 'BRAND')]
    :return:
    """
    results = [{"label": None, "text": None}] * len(text_list)

    for entity, token_id in zip(entity_lists, tokens):
        temp = {"label": None, "text": None}
        text_index, start, end1, label = entity
        end = end1 + 1

        tokens = token_id[start: end]
        token_text = tokenizer.decode(tokens)
        if token_text:
            label = get_chinese_label(label)
            temp["label"] = label
            temp["text"] = token_text
        results[text_index] = temp

    return results


# 加载数据集
class MyDataset(ListDataset):

    @staticmethod
    def load_data(texts: List[str]):
        D = []
        for _text in texts:
            d = ['']
            for i, char in enumerate(_text.split('\n')):
                d[0] += char
                d.append([i, i, "BRANDMODEL"])
            D.append(d)
        return D


def collate_fn(batch):
    batch_token_ids, batch_labels = [], []
    for d in batch:
        tokens = tokenizer.tokenize(d[0], maxlen=maxlen)
        mapping = tokenizer.rematch(d[0], tokens)
        start_mapping = {j[0]: i for i, j in enumerate(mapping) if j}
        end_mapping = {j[-1]: i for i, j in enumerate(mapping) if j}
        token_ids = tokenizer.tokens_to_ids(tokens)
        labels = np.zeros(len(token_ids))
        for start, end, label in d[1:]:
            if start in start_mapping and end in end_mapping:
                start = start_mapping[start]
                end = end_mapping[end]
                labels[start] = categories_label2id['B-' + label]
                labels[start + 1:end + 1] = categories_label2id['I-' + label]
        batch_token_ids.append(token_ids)
        batch_labels.append(labels)
    batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
    batch_labels = torch.tensor(sequence_padding(batch_labels), dtype=torch.long, device=device)
    return batch_token_ids, batch_labels


num_workers = 0


def inference_ner_batch(text_list1: List[str]):
    # encode text
    text_list = []
    for text in text_list1:
        text = text.replace(" ", "_")
        text_list.append(text)
    data_loader = DataLoader(MyDataset(text_list), batch_size=batch_size, shuffle=True, collate_fn=collate_fn, num_workers=num_workers)
    predict_output = []
    batch_token_ids = []
    for batch_token_ids, batch_labels in data_loader:
        predict_output = model.predict(batch_token_ids)
    # print(predict_output)
    # entity_list = [trans_entity2tuple(x) for x in predict_output]
    entity_list1 = trans_entity2tuple(predict_output)
    # print("entity_list1=", entity_list1)
    entity_list = fill_outputs_batch(text_list, entity_list1, batch_token_ids)
    # print("entity_list2=", entity_list)

    return entity_list


if __name__ == '__main__':
    # text_list = [
    #     "惠普(HP) CF360A 508A黑色原装硒鼓 （适用M553dn/M577dn/552dn） (6000页）",
    #     "南孚(NANFU)9V碱性电池1粒装 9v 适用于遥控玩具/烟雾报警器/无线麦克风/万用表/话筒/遥控器等 6LR61",
    #     "晨光(M&G)文具K35/0.5mm黑色中性笔 按动笔 经典子弹头签字笔 办公用水笔 12支/盒",
    # ]
    with open("./sku_name.txt", "r", encoding="utf-8") as f:
        text_list = f.readlines()
    text_list = [x.strip() for x in text_list if x.strip()]
    model.load_weights("./model/extract_brandmodel_model.pt")

    model.eval()
    time.sleep(1)
    start = time.time()
    for line in text_list:
        result = inference_ner_batch([line])
        print(line, result[0])
    end = time.time()
    print("use time", end - start)

