# encoding: utf-8
import os
import json
import time

import torch
# from docx import Document
# from pypinyin import lazy_pinyin
from transformers import BertTokenizer, BertForTokenClassification, BertConfig

from .utils import text2token, token2ids, replace_para, insert_whitespace
from .model import PinyinBertForMaskedLM
from ....config.base import ERROR_CORRECTION_MODEL_DEVICE, MODEL_BASE_PATH

# device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
device = torch.device(ERROR_CORRECTION_MODEL_DEVICE)

model_path_base = os.path.join(MODEL_BASE_PATH, 'CQU_ChineseSpellCorrect')


class CQUErrorCorrectModel:
    def __init__(self):
        # 加载tokenizer
        self.tokenizer = BertTokenizer.from_pretrained(os.path.join(model_path_base, "bert-base-chinese"))

        # 加载config
        with open(os.path.join(model_path_base, "bert-base-chinese/config.json"), "r", encoding="utf-8") as f:
            config = json.load(f)
        config = BertConfig(**config)

        self.detect_model = BertForTokenClassification(config=config).to(device)
        self.correct_model = PinyinBertForMaskedLM(config=config).to(device)

        # 加载模型参数
        self.load_param("norm")  # norm:通用 law:法律 med:医学 odw

        # 加载拼音
        self.pinyin_vocab = {}
        with open(os.path.join(model_path_base, "pinyin_vocab.txt"), "r", encoding="utf-8") as f:
            lines = f.readlines()
            for i, line in enumerate(lines):
                self.pinyin_vocab[line.strip("\n")] = i

    def load_param(self, model_name):
        model_path = os.path.join(model_path_base, model_name)
        detect_state_dict = torch.load(f"{model_path}/detect_model.bin", map_location=device)
        correct_state_dict = torch.load(f"{model_path}/correct_model.bin", map_location=device)
        self.detect_model.load_state_dict(detect_state_dict)
        self.correct_model.load_state_dict(correct_state_dict)
        self.detect_model.eval()
        self.correct_model.eval()

    def predict(self, text, threshold_p=0.8, model_name="通用"):
        inputs = self.tokenizer(text, return_tensors='pt').to(device)
        error_label, prob = detect(self.detect_model, self.tokenizer, [text], threshold_p, show_error=False)
        error_label = error_label.unsqueeze(-1).to(device)
        text_token, pinyin_token = text2token(text, self.tokenizer)
        # print('text_token', text_token)
        # print('pinyin_token', pinyin_token)
        pinyin_ids = torch.tensor([0] + token2ids(pinyin_token, self.pinyin_vocab) + [0], device=device)
        output = self.correct_model(**inputs, pinyin_ids=pinyin_ids, error_prob=error_label)

        predict_ids_for_output = output[0].argmax(-1)[0]
        result = ""
        for token, id_ in zip(text_token, predict_ids_for_output[1:-1]):
            if self.tokenizer.convert_ids_to_tokens(id_.item()) == "[UNK]":
                result += token
            else:
                result += self.tokenizer.convert_ids_to_tokens(id_.item()).replace("##", "")
        result = insert_whitespace(text, result)
        probs = {}
        for a, b in zip(text_token, prob[0][1:-1]):
            key_a = a
            while key_a in probs:
                key_a = "#" + key_a
            probs[key_a] = b
        return result, probs

    def correct_batch(self, texts, threshold_p=0.8):
        """
        统一接口形式
        :param texts:
        :param threshold_p:
        :return:
        """
        batch_result = []
        for text in texts:
            try:
                result, probs = self.predict(text, threshold_p)
            except:
                print('模型识别错误', text)
                result = text
            batch_result.append({'target': result})

        return batch_result


def detect(model, tokenizer, text, threshold_p, show_error=False):
    model_device = model.device
    inputs = tokenizer(text, return_tensors="pt", padding=True).to(model_device)
    _, pinyin_tokens = text2token(text[0], tokenizer)

    outputs = model(**inputs).logits
    outputs = torch.nn.functional.softmax(outputs, dim=-1)[:, :, 1]
    float_outputs = outputs.clone()

    # if show_error:
    #     print("error:", end="")
    #     for i, l in enumerate(norm_outputs[0][1:-1]):
    #         print(f"{text[0][i]}: {norm_outputs[0][i+1]:.3f}")

    outputs = (outputs > threshold_p).int()
    return outputs, float_outputs