import torch
from modules.Layer import _model_var, pad_sequence
import numpy as np
import torch.nn.functional as F
from basic.Vocab import *


class DialogDP(object):
    def __init__(self, plm_model, parser_model, config):
        self.training = False
        self.config = config
        self.plm_model = plm_model
        self.parser_model = parser_model

        self.use_cuda = next(filter(lambda p: p.requires_grad, parser_model.parameters())).is_cuda
        if self.use_cuda: 
            self.device = torch.device("cuda")
        else:
            self.device = torch.device("cpu")

    def train(self):
        self.plm_model.train()
        self.parser_model.train()
        self.training = True

    def eval(self):
        self.plm_model.eval()
        self.parser_model.eval()
        self.training = False

    def forward(self, plm_inputs, batch_sp, edu_lengths, arc_masks, feats):

        for k, v in plm_inputs.items():
            if isinstance(v, torch.Tensor):
                plm_inputs[k] = v.to(self.device)

        batch_sp = batch_sp.to(self.device)
        arc_masks = arc_masks.to(self.device)
        feats = feats.to(self.device)

        batch_size, max_edu_num, max_tok_len = plm_inputs['input_ids'].size()
        for k, v in plm_inputs.items():
            if isinstance(v, torch.Tensor):
                plm_inputs[k] = v.view(batch_size * max_edu_num, max_tok_len)
        plm_outputs = self.plm_model(**plm_inputs)
        token_reps = plm_outputs.last_hidden_state[:, 0, :].view(batch_size, max_edu_num, -1)

        self.arc_logits, self.rel_logits = self.parser_model(token_reps, batch_sp, edu_lengths, feats, arc_masks)
        return

    def compute_loss(self, gold_arcs, gold_rels):
        batch_size, max_edu_size, _ = self.arc_logits.size()
        gold_arcs = _model_var(self.parser_model, pad_sequence(gold_arcs,
                                                          length=max_edu_size, padding=-1, dtype=np.int64))
        batch_size, max_edu_size, _ = self.arc_logits.size()

        arc_loss = F.cross_entropy(self.arc_logits.view(batch_size * max_edu_size, -1),
                                   gold_arcs.view(-1), ignore_index=-1)

        _, _, _, label_size = self.rel_logits.size()
        rel_logits = _model_var(self.parser_model, torch.zeros(batch_size, max_edu_size, label_size))
        for batch_index, (logits, arcs) in enumerate(zip(self.rel_logits, gold_arcs)):
            rel_probs = []
            for i in range(max_edu_size):
                rel_probs.append(logits[i][int(arcs[i])])
            rel_probs = torch.stack(rel_probs, dim=0)
            rel_logits[batch_index] = torch.squeeze(rel_probs, dim=1)

        gold_rels = _model_var(self.parser_model, pad_sequence(gold_rels,
                                                          length=max_edu_size, padding=-1, dtype=np.int64))

        rel_loss = F.cross_entropy(rel_logits.view(batch_size * max_edu_size, -1),
                                   gold_rels.view(-1), ignore_index=-1)

        return arc_loss

    def compute_accuracy(self, gold_arcs, gold_rels):
        arc_correct, arc_total, rel_correct = 0, 0, 0
        pred_arcs = self.arc_logits.detach().max(2)[1].cpu().numpy()
        assert len(pred_arcs) == len(gold_arcs)

        batch_idx = 0
        for p_arcs, g_arcs in zip(pred_arcs, gold_arcs):
            edu_len = len(g_arcs)
            for idx in range(edu_len):
                if idx == 0: continue
                if p_arcs[idx] == g_arcs[idx]:
                    arc_correct += 1
                arc_total += 1
            batch_idx += 1

        batch_size, max_edu_size, _, label_size = self.rel_logits.size()

        gold_arcs_index = _model_var(self.parser_model, pad_sequence(gold_arcs,
                                                                length=max_edu_size,
                                                                padding=-1, dtype=np.int64))
        rel_logits = _model_var(self.parser_model, torch.zeros(batch_size, max_edu_size, label_size))
        for batch_index, (logits, arcs) in enumerate(zip(self.rel_logits, gold_arcs_index)):
            rel_probs = []
            for i in range(max_edu_size):
                rel_probs.append(logits[i][int(arcs[i])])
            rel_probs = torch.stack(rel_probs, dim=0)
            rel_logits[batch_index] = torch.squeeze(rel_probs, dim=1)
        pred_rels = rel_logits.detach().max(2)[1].cpu().numpy()

        assert len(pred_rels) == len(gold_rels)
        batch_idx = 0
        for p_rels, g_rels in zip(pred_rels, gold_rels):
            edu_len = len(g_rels)
            for idx in range(edu_len):
                if idx == 0: continue
                if p_rels[idx] == g_rels[idx]:
                    rel_correct += 1
            batch_idx += 1

        return arc_correct, arc_total, rel_correct
