from scipy.optimize import linear_sum_assignment

import torch
from torch import nn
import torch.nn.functional as F

from util.box_ops import box_cxcywh_to_xyxy, generalized_box_iou
from util.misc import (nested_tensor_from_tensor_list,
                       accuracy, get_world_size, interpolate, focal_accuracy,
                       is_dist_avail_and_initialized)
import torch.distributed as dist
import copy
import numpy as np
from  .setcriterionhoi import SetCriterionHOI as setcrithoi

class SetCriterionHOI(setcrithoi):

    def __init__(self, args, matcher, weight_dict, losses, loss_type='ce_loss'):
        super(SetCriterionHOI, self).__init__(args, matcher, weight_dict, losses, loss_type)

        

    def forward(self, outputs, targets):
        split = int(self.num_queries/3)
        if self.k_one2many !=1:
            # repeat the targets
            for target in targets:
                target["obj_boxes"] = target["obj_boxes"].repeat(self.k_one2many, 1)
                target["obj_labels"] = target["obj_labels"].repeat(self.k_one2many)
                target["sub_boxes"] = target["sub_boxes"].repeat(self.k_one2many, 1)
                target["verb_labels"] = target["verb_labels"].repeat(self.k_one2many, 1)

        final_losses = {}
        for i in range(3):
            outputs_without_aux = {k: v[:, i*split : (i+1)*split, :] for k, v in outputs.items() if k not in ['aux_outputs', 'enc_outputs']}
            # Retrieve the matching between the outputs of the last layer and the targets
            indices = self.matcher(outputs_without_aux, targets)

            num_interactions = sum(len(t['obj_labels']) for t in targets)

            num_interactions = torch.as_tensor([num_interactions], dtype=torch.float, device=next(iter(outputs.values())).device)
            if is_dist_avail_and_initialized() and num_interactions.device.type != 'cpu':
                torch.distributed.all_reduce(num_interactions)
            num_interactions = torch.clamp(num_interactions / get_world_size(), min=1).item()
            losses = {}
            # Compute all the requested losses
            for loss in self.losses:
                losses.update(self.get_loss(loss, outputs, targets, indices, num_interactions))

            # In case of auxiliary losses, we repeat this process with the output of each intermediate layer.
            if 'aux_outputs' in outputs:
                losses.update(self.aux_outputs_loss(outputs, targets, num_interactions))

            if 'enc_outputs' in outputs:
                losses.update(self.enc_outputs_loss(outputs, targets, num_interactions))

            if 'backbone_outputs' in outputs:
                losses.update(self.backbone_outputs_loss(outputs, targets, num_interactions))
                    
            if 'aux_outputs_enc' in outputs:
                losses.update(self.aux_outputs_enc_loss(outputs, targets, num_interactions))

            for loss_name, value in losses.items():
                final_losses.update({loss_name + '_{}'.format(i) : losses[loss_name]})
        return final_losses
