import torch.nn as nn
import torch
from torchvision.ops import box_convert, generalized_box_iou, box_iou

from scipy.optimize import linear_sum_assignment

# from backbone import Resnet50
# from hybrid_encoder import HybridEncoder
# from transformer import RTDETRTransformer
# from rtdetr import RTDETR
# from transforms import RandomFlip, Resize, ToTensor, Normalize, Compose
# from utils import collate_fn
# from dataset import CocoDetection

# from torch.utils.data import DataLoader
import torch.nn.functional as F

class HungarianMatch(nn.Module):

    def __init__(self, alpha=0.25, gamma=2, match_l1=5, match_giou=2, match_cls=2,
                 w_giou=2, w_box=5, w_focal=1):
        super().__init__()
        self.alpha = alpha
        self.gamma = gamma
        self.match_giou = match_giou
        self.match_l1 = match_l1
        self.match_cls = match_cls
        self.w_giou = w_giou
        self.w_box = w_box
        self.w_focal = w_focal

    def forward(self, dec_out, targets):
        '''
        dec_out:[{pred_box:, pred_logits:}, {}, ...]
        targets:[{target_box:, label:}, {}, ...]
        '''
        loss_dict = {}
        for num, i in enumerate(dec_out):
            #i -> {pred_box: , pred_logits: }
            pred_box = i['pred_box'] #tensor 2x300x4
            device = pred_box.device
            pred_logits = i ['pred_logits'] #tensor 2x300x80
            pred_prob = torch.sigmoid(pred_logits)
            batch_indices = []
            row_indices = []
            col_indices = []
            for j in range(len(targets)):
                pred_box_per_img = pred_box[j] #tensor 300x4
                pred_logits_per_img = pred_logits[j] #tensor 300x80
                pred_prob_per_img = torch.sigmoid(pred_logits_per_img)
                target_box = targets[j]['target_box'].to(device) #tensor num_gtx4
                target_label = targets[j]['label'].to(device) #tensor num_gt
                row_ind, col_ind = self.match(pred_box_per_img, 
                                                pred_prob_per_img, 
                                                target_box,
                                                target_label)
                row_indices.append(row_ind)
                col_indices.append(col_ind)
                batch_indices.append(j)
            #l1_loss
            target_box_assi = torch.concat([item['target_box'][col_indices[k]] for k, item in enumerate(targets)], dim=0).to(device) #tensor num_gt_total x 4
            # pred_box_assi = pred_box[batch_indices, row_indices] #tensor num_gt_total x 4
            selected_coords = []
            # 使用 for 循环单独索引每个批次
            for k in range(len(batch_indices)):
                batch_idx = batch_indices[k]
                row_idx = row_indices[k]
                selected_coords.append(pred_box[batch_idx, row_idx])
            pred_box_assi = torch.cat(selected_coords, dim=0).to(device)

            #-------------------------------使用vfl_loss--------------------------------------
            ious = []
            for k in range(len(batch_indices)):
                batch_idx = batch_indices[k]
                row_idx = row_indices[k]
                col_idx = col_indices[k]
                pred_box_match = box_convert(pred_box[batch_idx, row_idx], in_fmt='cxcywh', out_fmt='xyxy')  # num_gt_per_img x 4
                target_box_match = box_convert(targets[k]['target_box'][col_idx], in_fmt='cxcywh', out_fmt='xyxy') # num_ge_per_img x 4
                ious_per_img = box_iou(pred_box_match, target_box_match.to(device)).diag()
                ious.append(ious_per_img)
            #---------------------------------------------------------------------

            l1_loss = F.l1_loss(pred_box_assi, target_box_assi, reduction='none')
            l1_loss = l1_loss.sum() / float(target_box_assi.shape[0])
                # l1_loss_total += l1_loss.sum() / float(target_label.shape[0])
                
                #giou_loss 
            pred_box_assi_xyxy = box_convert(pred_box_assi, in_fmt='cxcywh', out_fmt='xyxy') #tensor num_gt_total x 4
            target_box_assi_xyxy = box_convert(target_box_assi, in_fmt='cxcywh', out_fmt='xyxy')
            giou_loss = 1 - torch.diag(generalized_box_iou(pred_box_assi_xyxy, target_box_assi_xyxy))

            giou_loss = giou_loss.sum() / float(target_box_assi.shape[0])

            #focal_loss
            #--------------------------------focal loss--------------------------------------
            # tensor = torch.full((len(targets), 300), 80).to(device)
            # for k in range(len(batch_indices)):
            #     batch_idx = batch_indices[k]
            #     row_idx = row_indices[k]
            #     col_idx = col_indices[k]
            #     tensor[batch_idx, row_idx] = targets[k]['label'][col_idx].to(device)
            # # tensor[batch_indices, row_indices] =  [item['label'][col_indices[k]] for k, item in enumerate(targets)]#tensor 2x300
            # label_one_hot = F.one_hot(tensor, 81)[..., :-1] #tensor 2x300x80
            # focal_loss = -(self.alpha * (1 - pred_prob) ** self.gamma * label_one_hot * torch.log(pred_prob) + \
            #         (1 - self.alpha) * pred_prob ** self.gamma * (1 - label_one_hot) * torch.log(1 - pred_prob))

            # focal_loss = focal_loss.mean(1).sum() * 300 / float(target_box_assi.shape[0])
            #----------------------------------end---------------------------------------------

            #-----------------------------------Vfl loss-------------------------------------------------
            tensor = torch.full((len(targets), 300), 80).to(device)
            tensor_score = torch.full((len(targets), 300), 0.).to(device) #增加了这一行
            for k in range(len(batch_indices)):
                batch_idx = batch_indices[k]
                row_idx = row_indices[k]
                col_idx = col_indices[k]
                tensor[batch_idx, row_idx] = targets[k]['label'][col_idx].to(device)
                tensor_score[batch_idx, row_idx] = ious[k] #增加了这一行 
            # tensor[batch_indices, row_indices] =  [item['label'][col_indices[k]] for k, item in enumerate(targets)]#tensor 2x300
            label_one_hot = F.one_hot(tensor, 81)[..., :-1] #tensor 2x300x80
            iou_score = tensor_score.unsqueeze(-1).expand(-1, -1, 80) #增加了这一行 2x300x80
            target_score = iou_score * label_one_hot #增加了这一行
            weight = target_score + (1 - label_one_hot) * self.alpha * pred_prob ** self.gamma #增加了这一行
            vfl_loss = -weight * (target_score * torch.log(pred_prob) + (1 - target_score) * torch.log(1 - pred_prob))

            vfl_loss = vfl_loss.mean(1).sum() * 300 / float(target_box_assi.shape[0])           
            #------------------------------------------------------------------------------------------------------


            loss_dict.update({f"l1_loss_{num}":l1_loss * self.w_box, f"giou_loss_{num}":giou_loss * self.w_giou,\
                               f"focal_loss_{num}":vfl_loss * self.w_focal})

        return loss_dict
                



    @torch.no_grad()
    def match(self, pred_box_per_img, 
              pred_prob_per_img,
              target_box,
              target_label):

        out = pred_prob_per_img[:, target_label]
        #cost_class
        pos_cost_class = - self.alpha * (1 - out) ** self.gamma * torch.log(out)
        neg_cost_class = - (1 - self.alpha) * out ** self.gamma * torch.log(1 - out)
        cost_class = pos_cost_class - neg_cost_class
        #cost_box
        cost_box = torch.cdist(pred_box_per_img, target_box, p=1)
        #cost_giou
        pred_box_per_img_xyxy = box_convert(pred_box_per_img, in_fmt='cxcywh', out_fmt='xyxy')
        target_box_xyxy = box_convert(target_box, in_fmt='cxcywh', out_fmt='xyxy')
        cost_giou = -generalized_box_iou(pred_box_per_img_xyxy, target_box_xyxy) #这里没加负号

        cost_matrix = cost_box * self.match_l1 + cost_class * self.match_cls + cost_giou * self.match_giou

        row_ind, col_ind = linear_sum_assignment(cost_matrix.cpu().numpy())

        return row_ind, col_ind



# if __name__ == '__main__':
#     backbone = Resnet50()
#     encoder = HybridEncoder()
#     decoder = RTDETRTransformer()
#     model = RTDETR(backbone, encoder, decoder).to('cuda')
#     transforms = Compose([Resize((640, 640)),
#                             RandomFlip(),
#                             ToTensor(),
#                             Normalize()])
#     anno_root = '/mnt/sdb2/ray/mmdetection-main/data/coco/annotations/instances_train2017.json'
#     img_root = '/mnt/sdb2/ray/mmdetection-main/data/coco/train2017/'
#     dataset = CocoDetection(anno_root=anno_root, image_root=img_root, transforms=transforms)
#     dataloader = DataLoader(dataset, batch_size=2, collate_fn=collate_fn, num_workers=1, shuffle=False)
#     loss = HungarianMatch()
#     model.train()
#     for samples, targets in dataloader:
#         samples = samples.to('cuda')
#         dec_out = model(samples)
#         loss_dict = loss(dec_out, targets)
#         print(loss_dict)



                