import torch
import torch.nn.functional as F


class DisParser(object):
    def __init__(self, global_encoder, dc_decoder, config):
        self.config = config
        self.global_encoder = global_encoder
        self.dc_decoder = dc_decoder

        self.device = torch.device("cpu")
        self.use_cuda = next(filter(lambda p: p.requires_grad, dc_decoder.parameters())).is_cuda
        if self.use_cuda:
            self.device = torch.device("cuda")

    def train(self):
        self.global_encoder.train()
        self.dc_decoder.train()
        self.training = True

    def eval(self):
        self.global_encoder.eval()
        self.dc_decoder.eval()
        self.training = False

    def forward(self, doc_inputs, edu_offset, b_win):
        for k, v in doc_inputs.items():
            if isinstance(v, torch.Tensor):
                doc_inputs[k] = v.to(self.device)
        edu_offset = edu_offset.to(self.device)


        token_reps = self.global_encoder(doc_inputs, edu_offset, b_win)
        self.logits = self.dc_decoder(token_reps)


    def compute_accuracy(self, gold_labels):
        total_num = 0
        correct = 0
        g_labels = gold_labels.numpy().flatten()
        p_labels = self.logits.detach().max(-1)[1].cpu().numpy().flatten()
        for g, p in zip(g_labels, p_labels):
            if g == -1:
                continue
            if g == p:
                correct += 1
            total_num += 1
        return total_num, correct

    def compute_loss(self, true_acs):
        if self.use_cuda:
            true_acs = true_acs.cuda()
        batch_size, action_len, action_num = self.logits.size()
        arc_loss = F.cross_entropy(
            self.logits.view(batch_size * action_len, action_num), true_acs.view(batch_size * action_len),
            ignore_index=-1)
        return arc_loss