import torch
import torch.nn as nn
import torch.nn.functional as F
from scipy.optimize import linear_sum_assignment
from tokenizer import PAD_ID, MASK, MASK_ID


class SequenceLoss(nn.Module):

    def __init__(self, label_smoothing, vocab_size, ignore_index=-100, ignore_indices=[]):
        super(SequenceLoss, self).__init__()

        if ignore_indices:
            ignore_index = ignore_indices[0]

        self.ignore_index = ignore_index
        self.ignore_indices = ignore_indices

        self.criterion = nn.CrossEntropyLoss(ignore_index=ignore_index, reduction='mean')


    def forward(self, output, target):
        batch_size, max_len, vocab_size = output.size()

        output = output.reshape(-1, vocab_size)
        target = target.reshape(-1)

        for idx in self.ignore_indices:
            if idx != self.ignore_index:
                target.masked_fill_((target == idx), self.ignore_index)

        loss = self.criterion(output, target)
        return loss


class GraphLoss(nn.Module):

    def __init__(self):
        super(GraphLoss, self).__init__()
        weight = torch.ones(7) * 10
        weight[0] = 1
        self.criterion = nn.CrossEntropyLoss(weight, ignore_index=-100)

    def forward(self, outputs, targets):
        results = {}
        if 'coords' in outputs:
            pred = outputs['coords']
            max_len = pred.size(1)
            target = targets['coords'][:, :max_len]
            mask = target.ge(0)

            loss = F.l1_loss(pred, target, reduction='none')
            results['coords'] = (loss * mask).sum() / mask.sum()

        if 'edges' in outputs:
            pred = outputs['edges']
            max_len = pred.size(-1)
            target = targets['edges'][:, :max_len, :max_len]
            results['edges'] = self.criterion(pred, target)

        return results


class Criterion(nn.Module):
    def __init__(self, args, tokenizer):
        super(Criterion, self).__init__()
        criterion = {}
        for format_ in args.formats:
            if format_ == 'edges':
                criterion['edges'] = GraphLoss()

            else:
                if MASK in tokenizer[format_].stoi:
                    ignore_indices = [PAD_ID, MASK_ID]
                else:
                    ignore_indices = []
                criterion[format_] = SequenceLoss(args.label_smoothing, len(tokenizer[format_]),
                                                  ignore_index=PAD_ID, ignore_indices=ignore_indices)
        self.criterion = nn.ModuleDict(criterion)

    def forward(self, results, refs):
        losses = {}

        for format_ in results:
            predictions, targets, *_ = results[format_]
            loss_ = self.criterion[format_](predictions, targets)

            if type(loss_) is dict:
                losses.update(loss_)

            else:
                if loss_.numel() > 1:
                    loss_ = loss_.mean()

                losses[format_] = loss_

        return losses
