import os

import pickle
import numpy as np
import torch
from torch.utils.data import DataLoader
from transformers import BertModel, BertConfig, BertTokenizer

from lsptrain.nlp_ner.model import NERDataSet, Model
from lsptrain.nlp_ner.train_scripts import rematch_tokenizer, sequence_padding


class TextNerPredictor(object):
    def __init__(self, save_dir: str = "checkpoint",
                 pretrained_model: str = "hfl/chinese-roberta-wwm-ext", device: str = "cuda:0",
                 model_file_name: str = "abc.pickle", max_length: int = 64
                 ):
        self.categories = []
        self.categories_id2label = {}
        self.categories_label2id = {}

        self.model = None
        self.device = torch.device("cpu")
        self.tokenizer = BertTokenizer.from_pretrained(pretrained_model)
        self.init(pretrained_model, save_dir, model_file_name, device)

    def init(self, pretrained_model: str, save_dir: str, model_file_name: str, device: str = "cuda:0"):
        pkl_file = os.path.join(save_dir, model_file_name)
        if not os.path.exists(pkl_file):
            raise Exception(f"pretrained file does not exist\nfile path: [{pkl_file}]")
        with open(pkl_file, "rb") as f:
            pickle_dict = pickle.load(f)
        self.categories = pickle_dict.get("categories", [])
        self.categories_id2label = pickle_dict.get("categories_id2label", {})
        self.categories_label2id = pickle_dict.get("categories_label2id", {})

        stat_dict = pickle_dict.get("stat_dict", {})
        bert_model = BertModel.from_pretrained(pretrained_model)
        config = BertConfig.from_pretrained(pretrained_model)
        self.model = Model(bert_model, config, self.categories)
        self.model.load_state_dict(stat_dict)
        if torch.cuda.is_available():
            self.device = torch.device(device)
        self.model.to(self.device)
        self.model.eval()
        print("using device=", self.device)

    def collate_fn(self, batch):
        batch_token_ids, batch_labels = [], []
        for d in batch:
            tokens = self.tokenizer.tokenize(d[0])  # noqa
            token_ids = self.tokenizer.encode(tokens)  # noqa
            labels = np.zeros(len(token_ids))
            batch_token_ids.append(token_ids)
            batch_labels.append(labels)

        batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=self.device)
        batch_labels = torch.tensor(sequence_padding(batch_labels), dtype=torch.long, device=self.device)

        return batch_token_ids, batch_labels

    def fill_predict_result(self, predict_result: list, temp_token_list: list, temp_label_list: list):
        temp = {"label": None, "text": None}
        label_list = [self.categories[i] for i in temp_label_list]
        label_list = [x.replace("B-", "").replace("I-", "") for x in label_list]
        assert len(set(label_list)) == 1, f"get more than 1 label in predict result:{set(label_list)}"

        temp['label'] = label_list[0]
        temp["text"] = self.tokenizer.decode(temp_token_list)
        predict_result.append(temp)
        return predict_result

    def predict(self, string: str):
        predict_result = []
        predict_dataset = NERDataSet([string])
        predict_dataloader = DataLoader(predict_dataset, batch_size=1, shuffle=False, collate_fn=self.collate_fn)

        for predict_input, label in predict_dataloader:
            predict_output = self.model(predict_input)[0][0]
            # print(predict_output)
            batch_token_ids_list = predict_input.int().tolist()
            predict_label_list = predict_output.int().tolist()
            for token_ids, predict_label in zip(batch_token_ids_list, predict_label_list):
                temp_token_list = []
                temp_label_list = []
                _idx = 0
                for temp_token, temp_label in zip(token_ids, predict_label):
                    if temp_label == 0:
                        # 处理前面的数据
                        if temp_token_list:
                            self.fill_predict_result(predict_result, temp_token_list, temp_label_list)
                            temp_token_list = []
                            temp_label_list = []
                        continue
                    if temp_label % 2 == 1:
                        if temp_token_list:
                            # 处理前面的数据
                            if temp_token_list:
                                self.fill_predict_result(predict_result, temp_token_list, temp_label_list)
                                temp_token_list = []
                                temp_label_list = []
                    temp_token_list.append(temp_token)
                    temp_label_list.append(temp_label)
                    if temp_label % 2 == 0:
                        temp_token_list.append(temp_token)
                        temp_label_list.append(temp_label)
                if temp_token_list:
                    self.fill_predict_result(predict_result, temp_token_list, temp_label_list)
        # print(predict_result)
        return predict_result


# if __name__ == '__main__':
#     save_path = "checkpoint"
#     pretrained_model = r"D:/codes/nlp_about/pretrained_model/hfl_chinese-roberta-wwm-ext"
#     device = "cuda:0"
#     model_file_name = "txt_ner_model_2_0.9479071291900103.pickle"
#     predictor = TextNerPredictor(save_path, pretrained_model, device, model_file_name)
#     # 得力（deli） 订书钉通用型标准型学生用/办公用厚层订书机订书针 0010订书钉10#（10盒）
#     while True:
#         string_ = input("请输入文本:")
#         predict_result = predictor.predict(string_)
#         print(predict_result)
