import os.path

import numpy as np
import torch
import torch.nn as nn

from common.ner_tools import SeqEntityScore, get_entities
from model_arch.embedding.embedding_model import BertEmbeddingModel
from torch.nn import CrossEntropyLoss
from loguru import logger


def get_model():
    return ModelWrapper


class ModelWrapper(nn.Module):
    def __init__(self, model_config):
        super(ModelWrapper, self).__init__()
        self.embedding_model = BertEmbeddingModel(model_config.bert_model_path)
        setattr(model_config, 'bert_config', self.embedding_model.bert_config)
        self.model = BertSoftMax(model_config)
        self.model_config = model_config

    def forward(self, input_ids, attention_mask=None, token_type_ids=None, labels=None):
        # 参考https://huggingface.co/docs/transformers/model_doc/bert#bertmodel
        embed_outputs = self.embedding_model(input_ids=input_ids, attention_mask=attention_mask,
                                             token_type_ids=token_type_ids)
        # embed_outputs:last_hidden_state, pooler_output, (hidden_states), (attentions)
        outputs = self.model(embed_outputs, attention_mask, labels=labels)
        return outputs  # (loss), logits, (hidden_states), (attentions)

    def evaluate(self, dev_dataloader):
        metric = SeqEntityScore(self.model_config.id2label, markup=self.model_config.markup)
        eval_loss = 0.0
        nb_eval_steps = 0
        for step, batch in enumerate(dev_dataloader):
            self.eval()
            batch = tuple(t.to(self.model_config.device) for t in batch)
            with torch.no_grad():
                inputs = {"input_ids": batch[0], "token_type_ids": batch[2], "attention_mask": batch[1],
                          "labels": batch[3]}
                outputs = self.forward(**inputs)
            tmp_eval_loss, logits = outputs[:2]
            eval_loss += tmp_eval_loss.item()
            nb_eval_steps += 1
            preds = np.argmax(logits.cpu().numpy(), axis=2).tolist()
            out_label_ids = inputs['labels'].cpu().numpy().tolist()
            input_lens = batch[4].cpu().numpy().tolist()
            for i, label in enumerate(out_label_ids):
                temp_1 = []
                temp_2 = []
                for j, m in enumerate(label):
                    if j == 0:
                        continue
                    elif j == input_lens[i] - 1:
                        metric.update(pred_paths=[temp_2], label_paths=[temp_1])
                        break
                    else:
                        temp_1.append(self.model_config.id2label[out_label_ids[i][j]])
                        temp_2.append(preds[i][j])

        eval_loss = eval_loss / nb_eval_steps
        eval_info, entity_info = metric.result()

        return eval_loss, eval_info

    def predict(self, test_dataloader, tokenizer):
        # test_dataloader的batch_size=1
        self.to(self.model_config.device)
        results = []
        for batch in test_dataloader:
            self.eval()
            batch = tuple(t.to(self.model_config.device) for t in batch)
            with torch.no_grad():
                inputs = {"input_ids": batch[0], "attention_mask": batch[1], "token_type_ids": batch[2], "labels": None}
                outputs = self.forward(**inputs) #loss=None, logits, (hidden_states), (attentions)
            logits = outputs[1]
            preds = logits.detach().cpu().numpy()
            preds = np.argmax(preds, axis=2).tolist()
            preds = preds[0][1:-1]  # [CLS]XXXX[SEP]
            label_entities = get_entities(preds, self.model_config.id2label, self.model_config.markup)
            ori_text = tokenizer.decode(batch[0][0][1:-1]).split(" ")
            all_entities = []
            for item in label_entities:
                label, start, end = item
                entity = "".join(ori_text[start:end + 1])
                all_entities.append([label, start, end, entity])

            json_d = {}
            json_d['text'] = "".join(ori_text)
            json_d['entities'] = all_entities
            results.append(json_d)
        return results

    def _save_embedding_model(self, save_name):
        """
        保存embedding model
        Parameters
        ----------
        save_name : 带有路径的文件名
        Returns
        -------
        """
        save_path = os.path.dirname(save_name)
        if not os.path.exists(save_path):
            os.makedirs(save_path)
            logger.warning("{} not exists, create it!".format(save_path))
        torch.save(self.embedding_model.state_dict(), save_name)

    def _save_upper_model(self, save_name):
        save_path = os.path.dirname(save_name)
        if not os.path.exists(save_path):
            os.makedirs(save_path)
            logger.warning("{} not exists, create it!".format(save_path))
        torch.save(self.model.state_dict(), save_name)

    def save_model(self, save_full_name, mode):
        assert mode in ["all", "embedding", "upper"]
        save_path = os.path.dirname(save_full_name)
        if not os.path.exists(save_path):
            os.makedirs(save_path)
            logger.warning("{} not exists, create it!".format(save_path))
        if mode == "all":
            torch.save(self.state_dict(), save_full_name)
            logger.info("save model to {}, mode={}".format(save_full_name, mode))
        else:
            raise NotImplementedError

    def _load_embedding_model(self, file_path):
        self.embedding_model.load_state_dict(torch.load(file_path))
        logger.info("load embedding model from {}".format(file_path))

    def _load_upper_model(self, file_path):
        self.model.load_state_dict(torch.load(file_path))
        logger.info("load upper model from {}".format(file_path))

    def load_model(self, file_path, mode):
        assert mode in ["all", "embedding", "upper"]
        if mode == "all":
            self.load_state_dict(torch.load(file_path))
            logger.info("load model from {}, mode={}".format(file_path, mode))
        else:
            raise NotImplementedError


class BertSoftMax(nn.Module):
    def __init__(self, model_config):
        super(BertSoftMax, self).__init__()
        self.num_labels = model_config.num_labels
        self.dropout = nn.Dropout(model_config.bert_config.hidden_dropout_prob)
        self.classifier = nn.Linear(model_config.bert_config.hidden_size, model_config.num_labels)

    def forward(self, embed_outputs, attention_mask, labels=None):
        # 参考https://huggingface.co/docs/transformers/model_doc/bert#bertmodel

        sequence_output = embed_outputs[0]
        sequence_output = self.dropout(sequence_output)
        logits = self.classifier(sequence_output)
        outputs = [None, logits] + list(embed_outputs[2:])  # add hidden states and attention if they are here
        if labels is not None:
            loss_fct = CrossEntropyLoss(ignore_index=0)
            # Only keep active parts of the loss
            if attention_mask is not None:
                active_loss = attention_mask.view(-1) == 1
                active_logits = logits.view(-1, self.num_labels)[active_loss]
                active_labels = labels.view(-1)[active_loss]
                loss = loss_fct(active_logits, active_labels)
            else:
                loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
            outputs[0] = loss
        return outputs  # (loss), logits, (hidden_states), (attentions)
