#! -*- coding:utf-8 -*-
# bert+crf用来做实体识别
# 数据集：http://s3.bmio.net/kashgari/china-people-daily-ner-corpus.tar.gz
# [valid_f1]  token_level: 97.06； entity_level: 95.90
# bert4torch==0.5.1
# 尝试批量推理,没有成功

import os
import time
import torch

from typing import List, Tuple, Dict

import numpy as np
import torch.nn as nn
from torch.utils.data import DataLoader, Dataset

from bert4torch.snippets import sequence_padding, ListDataset
from bert4torch.layers import CRF
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel

maxlen = 128
batch_size = 160
bio_tags = 6  # BIO tags的个数
categories = ["BRAND", "MODEL", "NAME", "COLOR", "SPECS", "UNIT"]


labels = ['O']
for label in categories:
    labels.append("B-" + label)
    labels.append("I-" + label)
categories_id2label = {i: k for i, k in enumerate(categories)}
categories_label2id = {k: i for i, k in enumerate(categories)}

"""
categories_id2label = {0: 'O',
 1: 'B-BRAND',
 2: 'I-BRAND',
 3: 'B-MODEL',
 4: 'I-MODEL',
 5: 'B-NAME',
 6: 'I-NAME',
 7: 'B-COLOR',
 8: 'I-COLOR',
 9: 'B-SPECS',
 10: 'I-SPECS',
 11: 'B-UNIT',
 12: 'I-UNIT'}
 
categories_label2id = {'O': 0,
 'B-BRAND': 1,
 'I-BRAND': 2,
 'B-MODEL': 3,
 'I-MODEL': 4,
 'B-NAME': 5,
 'I-NAME': 6,
 'B-COLOR': 7,
 'I-COLOR': 8,
 'B-SPECS': 9,
 'I-SPECS': 10,
 'B-UNIT': 11,
 'I-UNIT': 12}
 
 
"""

device = 'cuda:0' if torch.cuda.is_available() else 'cpu'

# pretrained_model = r"G:/nlp_about/pretrained_models/hfl_chinese-electra-180g-large"
pretrained_model = r"G:/nlp_about/pretrained_models/hfl-chinese-roberta-wwm-ext"
# 建立分词器
dict_path = os.path.join(pretrained_model, "vocab.txt")
config_path = os.path.join(pretrained_model, "config.json")
checkpoint_path = os.path.join(pretrained_model, "pytorch_model.bin")

tokenizer = Tokenizer(dict_path, do_lower_case=True)


def collate_fn(batch):
    batch_token_ids, batch_labels, batch_entity_ids, batch_entity_labels = [], [], [], []
    for d in batch:
        tokens = tokenizer.tokenize(d[0], maxlen=maxlen)
        mapping = tokenizer.rematch(d[0], tokens)
        start_mapping = {j[0]: i for i, j in enumerate(mapping) if j}
        end_mapping = {j[-1]: i for i, j in enumerate(mapping) if j}
        token_ids = tokenizer.tokens_to_ids(tokens)
        labels = np.zeros(len(token_ids))
        entity_ids, entity_labels = [], []
        for start, end, label in d[1:]:
            if start in start_mapping and end in end_mapping:
                start = start_mapping[start]
                end = end_mapping[end]
                labels[start] = 1  # 标记B
                labels[start + 1:end + 1] = 2  # 标记I
                entity_ids.append([start, end])
                entity_labels.append(categories.index(label) + 1)

        if not entity_ids:  # 至少要有一个标签
            entity_ids.append([0, 0])  # 如果没有则用0填充
            entity_labels.append(0)

        batch_token_ids.append(token_ids)
        batch_labels.append(labels)
        batch_entity_ids.append(entity_ids)
        batch_entity_labels.append(entity_labels)

    batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
    batch_labels = torch.tensor(sequence_padding(batch_labels), dtype=torch.long, device=device)
    batch_entity_ids = torch.tensor(sequence_padding(batch_entity_ids), dtype=torch.long, device=device)  # [btz, 实体个数，start/end]
    batch_entity_labels = torch.tensor(sequence_padding(batch_entity_labels), dtype=torch.long, device=device)  # [btz, 实体个数]
    return [batch_token_ids, batch_entity_ids], [batch_labels, batch_entity_labels]


class MyDataset(ListDataset):
    @staticmethod
    def load_data(lines):
        data_ = []

        for line in lines:
            if not line:
                continue
            d = ['']
            for i, char in enumerate(line):
                flag = "NAME"
                d[0] += char
                d.append([i, i, flag])
            data_.append(d)
        return data_


class MyDataset2(Dataset):

    def __init__(self, lines):
        self.data = self.load_data(lines)

    def __len__(self):
        return len(self.data)

    def __getitem__(self, item):
        return self.data[item]

    @staticmethod
    def load_data(lines):
        data_ = []

        for line in lines:
            if not line:
                continue
            d = ['']
            for i, char in enumerate(line):
                flag = "NAME"
                d[0] += char
                d.append([i, i, flag])
            data_.append(d)
        return data_


# 定义bert上的模型结构
class Model(BaseModel):
    def __init__(self):
        super().__init__()
        self.bert = build_transformer_model(config_path=config_path, checkpoint_path=checkpoint_path, segment_vocab_size=0)
        self.dense1 = nn.Linear(768, bio_tags)
        self.dense2 = nn.Linear(768, len(categories) + 1)  # 包含padding
        self.crf = CRF(bio_tags)

    def forward(self, *inputs):
        # 一阶段的输出
        token_ids, entity_ids = inputs[0], inputs[1]
        last_hidden_state = self.bert([token_ids])  # [btz, seq_len, hdsz]
        emission_score = self.dense1(last_hidden_state)  # [bts, seq_len, tag_size]
        attention_mask = token_ids.gt(0)

        # 二阶段输出
        btz, entity_count, _ = entity_ids.shape
        hidden_size = last_hidden_state.shape[-1]
        entity_ids = entity_ids.reshape(btz, -1, 1).repeat(1, 1, hidden_size)
        entity_states = torch.gather(last_hidden_state, dim=1, index=entity_ids).reshape(btz, entity_count, -1, hidden_size)
        entity_states = torch.mean(entity_states, dim=2)  # 取实体首尾hidden_states的均值
        entity_logit = self.dense2(entity_states)  # [btz, 实体个数，实体类型数]

        return emission_score, attention_mask, entity_logit

    def predict2(self, token_ids):
        self.eval()
        with torch.no_grad():
            # 一阶段推理
            last_hidden_state = self.bert(token_ids)  # [btz, seq_len, hdsz]
            emission_score = self.dense1(last_hidden_state)  # [bts, seq_len, tag_size]
            attention_mask = token_ids[0].gt(0)
            best_path = self.crf.decode(emission_score, attention_mask)  # [bts, seq_len]

            # 二阶段推理
            batch_entity_ids = []
            for one_samp in best_path:
                entity_ids = []
                for j, item in enumerate(one_samp):
                    if item.item() == 1:  # B
                        entity_ids.append([j, j])
                    elif len(entity_ids) == 0:
                        continue
                    elif (len(entity_ids[-1]) > 0) and (item.item() == 2):  # I
                        entity_ids[-1][-1] = j
                    elif len(entity_ids[-1]) > 0:
                        entity_ids.append([])
                if not entity_ids:  # 至少要有一个标签
                    entity_ids.append([0, 0])  # 如果没有则用0填充
                batch_entity_ids.append([i for i in entity_ids if i])
            batch_entity_ids = torch.tensor(sequence_padding(batch_entity_ids), dtype=torch.long, device=device)  # [btz, 实体个数，start/end]

            btz, entity_count, _ = batch_entity_ids.shape
            hidden_size = last_hidden_state.shape[-1]
            gather_index = batch_entity_ids.reshape(btz, -1, 1).repeat(1, 1, hidden_size)
            entity_states = torch.gather(last_hidden_state, dim=1, index=gather_index).reshape(btz, entity_count, -1, hidden_size)
            entity_states = torch.mean(entity_states, dim=2)  # 取实体首尾hidden_states的均值
            entity_logit = self.dense2(entity_states)  # [btz, 实体个数，实体类型数]
            entity_pred = torch.argmax(entity_logit, dim=-1)  # [btz, 实体个数]

            # 每个元素为一个三元组
            entity_tulpe = trans_entity2tuple(batch_entity_ids, entity_pred)
        return best_path, entity_tulpe



model = Model().to(device)


def trans_entity2tuple(entity_ids, entity_labels):
    '''把tensor转为(样本id, start, end, 实体类型)的tuple用于计算指标
    '''
    entity_true = set()
    for i, one_sample in enumerate(entity_ids):
        for j, item in enumerate(one_sample):
            if item[0].item() * item[1].item() != 0:
                entity_true.add((i, item[0].item(), item[1].item(), entity_labels[i, j].item()))
    return entity_true


def trans_entity2tuple2(scores):
    '''把tensor转为(样本id, start, end, 实体类型)的tuple用于计算指标
    '''
    batch_entity_ids = list()
    for i, one_samp in enumerate(scores):
        entity_ids = []
        for j, item in enumerate(one_samp):
            flag_tag = categories_id2label[item.item()]
            if flag_tag.startswith('B-'):  # B
                entity_ids.append([i, j, j, flag_tag[2:]])
            elif len(entity_ids) == 0:
                continue
            elif (len(entity_ids[-1]) > 0) and flag_tag.startswith('I-') and (flag_tag[2:] == entity_ids[-1][-1]):  # I
                entity_ids[-1][-2] = j
            elif len(entity_ids[-1]) > 0:
                entity_ids.append([])

        for i in entity_ids:
            if i:
                batch_entity_ids.append(tuple(i))
    return batch_entity_ids


def fill_predict_result(predict_result: list, temp_token_list: list, temp_label_list: list):
    temp = {"label": None, "text": None}
    label_list = [categories[i] for i in temp_label_list]
    label_list = [x.replace("B-", "").replace("I-", "") for x in label_list]
    assert len(set(label_list)) == 1, f"get more than 1 label in predict result:{set(label_list)}"

    temp['label'] = label_list[0]
    temp["text"] = tokenizer.decode(temp_token_list)
    predict_result.append(temp)
    return predict_result


def fill_outputs_back(text: str, entity_list: List[Tuple[int, int, int, str]], token_offsets: List[List[List[int]]]):
    _text = text.replace("_", " ")
    result = []
    token_offset = token_offsets[0]
    for entity in entity_list:
        label_ids = []
        start = entity[1]
        if start >= int(len(token_offset) / 2):
            continue
        end = entity[2] + 1
        label = entity[3]
        for idx in range(start, end):
            label_ids.extend(token_offset[idx])

        if label_ids:
            one = dict()
            one["start"] = min(label_ids)
            one["end"] = max(label_ids)
            one["label"] = label
            one["text"] = "".join([_text[x] for x in label_ids])
            result.append(one)
    return result


def fill_outputs(text: str, entity_list: List[Tuple[int, int, int, str]]):
    _text = text.replace("_", " ")
    token_ids = tokenizer.encode(text, text, return_offsets=True)
    result = []
    latest_id = -1
    for entity in entity_list:
        label_ids = []
        start = entity[1]
        if start >= int(len(text)):
            continue
        if start < latest_id:
            continue
        if start > latest_id:
            latest_id = start

        end = entity[2] + 1
        label = entity[3]

        for idx in range(start, end):
            label_ids.extend(token_ids[2][idx])

        if label_ids:
            one = dict()
            one["start"] = min(label_ids)
            one["end"] = max(label_ids)
            one["label"] = label
            one["text"] = "".join([_text[x] for x in label_ids])
            result.append(one)
    return result


def build_entity_list(batch_entity_list, entity_list):
    """
    todo 处理标签下标 start end
    :param batch_entity_list:
    :param entity_list:
    :return:
    """
    temp_list = []
    # entity
    # (0, 1, 2, 'BRAND')
    current = None
    for entity in entity_list:
        idx = entity[0]
        if current is None:
            current = idx
        if idx == current:
            temp_list.append(entity)

        else:
            # idx != current
            batch_entity_list.append(temp_list)
            temp_list = []
            current = idx

    return batch_entity_list


def inference_ner_batch(text_list: List[str]):
    predict_result = []
    batch_entity_list = []
    my_dataset = MyDataset(text_list)
    my_dataloader = DataLoader(my_dataset, batch_size=2, collate_fn=collate_fn)
    # todo 后续处理
    for batch_token_ids, batch_labels in my_dataloader:
        emission_score, attention_mask = model.predict(batch_token_ids)
        # entity_pred = torch.argmax(entity_logit, dim=-1)  # [btz, 实体个数]
        predict_output = model.crf.decode(emission_score, attention_mask)
        entity_list = trans_entity2tuple2(best_path)
        build_entity_list(batch_entity_list, entity_list)

    return entity_list


if __name__ == '__main__':
    text = "得力0010订书钉10#(1000枚/盒)10盒/包(单位:包)"
    text2 = "恒夫曼 M14/35mm 内六角螺栓 DIN912 12.9级  全牙 黑色"
    text = text.replace(" ", "_")
    text2 = text2.replace(" ", "_")
    model.load_weights("./bert_cascade_model/best_model.pt")

    model.eval()
    time.sleep(1)
    _start = time.time()
    result = inference_ner_batch([text, text2])
    _end = time.time()
    print("use time", _end - _start)
    # 0.12297940254211426
    print(result)
    # [[{'start': 0, 'end': 1, 'label': 'BRAND', 'text': '得力'}, {'start': 19, 'end': 19, 'label': 'UNIT', 'text': '盒'},
    #   {'start': 25, 'end': 25, 'label': 'UNIT', 'text': '包'}, {'start': 30, 'end': 30, 'label': 'UNIT', 'text': '包'}],
    # [{'start': 0, 'end': 2, 'label': 'BRAND', 'text': '恒夫曼'}, {'start': 4, 'end': 11, 'label': 'MODEL', 'text': 'M14/35mm'}]]
