# Depth Loss Head for stereo matching supervision.


import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributed as dist


class DepthLossHead(nn.Module):
    def __init__(self, model_cfg, point_cloud_range):
        super().__init__()
        self.model_cfg = model_cfg
        self.depth_loss_type = model_cfg.LOSS_TYPE
        self.loss_weights = model_cfg.WEIGHTS
        self.point_cloud_range = point_cloud_range
        self.min_depth = point_cloud_range[0]
        self.max_depth = point_cloud_range[3]
        self.forward_ret_dict = {}
        self.downsample_disp = model_cfg.get('downsample_disp', 4)
        self.edge_aware_loss = None
        self.inference_speed = model_cfg.get('inference_speed', False)
        if self.model_cfg.get('EDGE_AWARE', False):
            from mmdet3d.models.builder import build_loss
            self.edge_aware_loss = build_loss(dict(type='FocalLoss'))

    def get_loss(self, batch_dict, tb_dict=None):
        if tb_dict is None:
            tb_dict = {}

        depth_preds = batch_dict['depth_preds'] #[(1,320,1248)]
        depth_volumes = batch_dict['depth_volumes']
        depth_sample = batch_dict['depth_samples']
        gt = batch_dict['depth_gt_img'].squeeze(1) #([1, 480, 928])
        if self.edge_aware_loss:
            step = 7
            B, H, W = gt.size()
            depth_tmp = gt.reshape(B, H, W)
            pad = int((step - 1) // 2)
            depth_tmp = F.pad(depth_tmp, [pad, pad, pad, pad], mode='constant', value=0)
            patches = depth_tmp.unfold(dimension=1, size=step, step=1) # Height-wise
            patches = patches.unfold(dimension=2, size=step, step=1) # Width-wise
            max_depth, _ = patches.reshape(B, H, W, -1).max(dim=-1)  # ([1, 480, 928])
            # save_normalized_tensor_as_image(max_depth, 'dense_depth.png')

            step = float(step)
            shift_list = [[step / H, 0.0 / W], [-step / H, 0.0 / W], [0.0 / H, step / W], [0.0 / H, -step / W]]
            max_depth_tmp = max_depth.reshape(B, 1, H, W)
            output_list = []
            for shift in shift_list:
                transform_matrix =torch.tensor([[1, 0, shift[0]],[0, 1, shift[1]]]).unsqueeze(0).repeat(B, 1, 1).cuda()
                grid = F.affine_grid(transform_matrix, max_depth_tmp.shape, align_corners=True).float()
                output = F.grid_sample(max_depth_tmp, grid, mode='nearest', align_corners=True).reshape(B, 1, H, W)  #平移后图像
                output = max_depth - output
                output_mask = ((output == max_depth) == False)
                output = output * output_mask
                output_list.append(output)
            grad = torch.cat(output_list, dim=1)  # [2, 6, 4, 256, 704]
            max_grad = torch.abs(grad).max(dim=1)[0].clamp(-30,30)
            # save_normalized_tensor_as_image(max_grad, 'depth_max_grad.png')
        height = gt.shape[1]
        depth_preds = [d[:, :height] for d in depth_preds]
        depth_volumes = [d[:, :, :height] for d in depth_volumes]

        depth_loss = 0.
        assert len(depth_preds) == len(depth_volumes)
        assert len(depth_preds) == len(self.loss_weights)
        mask = (gt > self.min_depth) & (gt < self.max_depth) #(1,320,1248)
        gt = gt[mask] #(15809)
        if self.edge_aware_loss:
            edge_weight = (torch.abs(max_grad) / torch.max(torch.abs(max_grad)))[mask]
        depth_interval = depth_sample[1] - depth_sample[0]
        


        assert not self.model_cfg.get('DIST_WEIGHT', False) or list(self.depth_loss_type)[0] in ['ce', 'gaussian'], 'invalid distanced-based weighted loss'

        for i, (depth_pred, depth_cost, pred_weight) in enumerate(zip(depth_preds, depth_volumes, self.loss_weights)):

            if depth_pred.shape[-2:] != mask.shape[-2:]:
                depth_pred = F.interpolate(depth_pred, [self.max_depth, *mask.shape[-2:]],
                mode='trilinear',
                align_corners=True)
            depth_pred = depth_pred[mask]
            depth_cost = depth_cost.permute(0, 2, 3, 1)[mask]

            for loss_type, loss_type_weight in self.depth_loss_type.items():
                if depth_pred.shape[0] == 0:
                    print('no gt warning')
                    loss = depth_preds[i].mean() * 0.0
                else:
                    if loss_type == "l1":
                        loss = F.smooth_l1_loss(depth_pred, gt, reduction='none')
                        loss = loss.mean()
                    elif loss_type == "purel1":
                        loss = F.l1_loss(depth_pred, gt, reduction='none')
                        loss = loss.mean()
                    elif loss_type == "ce":
                        depth_log_prob = F.log_softmax(depth_cost, dim=1)
                        distance = torch.abs(
                            depth_sample.cuda() - gt.unsqueeze(-1)) / depth_interval
                        probability = 1 - distance.clamp(max=1.0)
                        loss = -(probability * depth_log_prob).sum(-1)

                        if self.model_cfg.get('DIST_WEIGHT', False):
                            dist_weight_power = self.model_cfg.get('DIST_WEIGHT_POWER', 1.)
                            gt_weight = gt ** dist_weight_power
                            loss = (loss * gt_weight).sum() / gt_weight.sum()
                        else:
                            loss = loss.mean()
                    elif loss_type.startswith("gaussian"):
                        depth_log_prob = F.log_softmax(depth_cost, dim=1)
                        distance = torch.abs(
                            depth_sample.cuda() - gt.unsqueeze(-1))
                        sigma = float(loss_type.split("_")[1])
                        if dist.get_rank() == 0:
                            print("depth loss using gaussian normalized", sigma)
                        probability = torch.exp(-0.5 * (distance ** 2) / (sigma ** 2))
                        probability /= torch.clamp(probability.sum(1, keepdim=True), min=1.0)
                        loss = -(probability * depth_log_prob).sum(-1)
                        if self.model_cfg.get('DIST_WEIGHT', False):
                            dist_weight_power = self.model_cfg.get('DIST_WEIGHT_POWER', 1.)
                            gt_weight = gt ** dist_weight_power
                            loss = (loss * gt_weight).sum() / gt_weight.sum()
                        else:
                            loss = loss.mean()
                        loss = loss.mean()
                    elif loss_type.startswith("laplacian"):
                        depth_log_prob = F.log_softmax(depth_cost, dim=1)
                        distance = torch.abs(
                            depth_sample.cuda() - gt.unsqueeze(-1))
                        sigma = float(loss_type.split("_")[1])
                        if dist.get_rank() == 0:
                            print("depth loss using laplacian normalized", sigma)
                        probability = torch.exp(-distance / sigma)
                        probability /= torch.clamp(probability.sum(1, keepdim=True), min=1.0)
                        loss = -(probability * depth_log_prob).sum(-1)
                        loss = loss.mean()
                    elif loss_type == "hard_ce":
                        depth_log_prob = F.log_softmax(depth_cost, dim=1)
                        distance = torch.abs(
                            depth_sample.cuda() - gt.unsqueeze(-1)) / depth_interval
                        probability = 1 - distance.clamp(max=1.0)
                        probability[probability >= 0.5] = 1.0
                        probability[probability < 0.5] = .0

                        loss = -(probability * depth_log_prob).sum(-1)

                        loss = loss.mean()
                    else:
                        raise NotImplementedError

                tb_dict['loss_depth_{}_{}'.format(i, loss_type)] = loss.item()
                depth_loss += pred_weight * loss_type_weight * loss
            
            if self.edge_aware_loss:
                # focal loss of edge aware depth supervision
                gamma=2.0
                depth_prob = F.softmax(depth_cost, dim=1)
                distance = torch.abs(
                    depth_sample.cuda() - gt.unsqueeze(-1)) / depth_interval
                target = 1 - distance.clamp(max=1.0)
                # pt = (1 - depth_prob) * target + depth_prob * (1 - target) #TODO
                edge_focal_loss = -(target * F.log_softmax(depth_cost, dim=1))
                focal_weight = (1 - depth_prob)**gamma

                edge_focal_loss = (focal_weight*edge_focal_loss).sum(-1)
                edge_focal_loss = edge_focal_loss.mean()

                depth_loss +=  edge_focal_loss
        return depth_loss, tb_dict

    def forward(self, batch_dict):
        # if batch_dict['depth_preds'][-1].shape[0] != 1:
        #     raise NotImplementedError
        if not self.training and not self.inference_speed:
            # return batch_dict
            # depth_pred = batch_dict['depth_preds'][-1]
            depth_pred_locals = batch_dict['depth_preds_local'][-1]
            # depth_cost = batch_dict['depth_volumes'][0].permute(0, 2, 3, 1)
            # depth_sample = batch_dict['depth_samples']

            N = depth_pred_locals.shape[0]

            # batch_dict['depth_error_map'] = []
            batch_dict['depth_error_all_local_median'] = []
            for thresh in [0.2, 0.4, 0.8, 1.6]:
                batch_dict[f"depth_error_all_local_{thresh:.1f}m"] = []
            batch_dict['depth_error_fg_local_statistics_perbox'] = []

            for b in range(N):
                #TODO(hack)
                depth_pred_local = depth_pred_locals[..., :batch_dict['depth_gt_img'].shape[-1]][b:b+1]
                gt = batch_dict['depth_gt_img'].squeeze(1)[b:b+1]
                depth_fgmask_img = batch_dict['depth_fgmask_img'].squeeze(1)[b:b+1]

                mask = (gt > self.min_depth) & (gt < self.max_depth)
                # depth_interval = depth_sample[1] - depth_sample[0]
                assert mask.sum() > 0

                # abs error
                error_map = torch.abs(depth_pred_local - gt) * mask.float()
                # batch_dict['depth_error_map'].append(error_map)

                # mean_error = error_map[mask].mean()
                median_error = error_map[mask].median()

                # batch_dict['depth_error_local_mean'] = mean_error
                batch_dict['depth_error_all_local_median'].append( median_error )
                for thresh in [0.2, 0.4, 0.8, 1.6]:
                    batch_dict[f"depth_error_all_local_{thresh:.1f}m"].append( (error_map[mask] > thresh).float().mean() )

                if 'depth_fgmask_img' in batch_dict:
                    fg_mask = (gt > self.min_depth) & (gt < self.max_depth) & (depth_fgmask_img > 0)
                    local_errs = torch.abs(depth_pred_local - gt)
                    fg_local_errs = local_errs[fg_mask]

                    # fg local depth errors per instance
                    fg_gts = gt[fg_mask]
                    batch_dict['depth_error_fg_local_statistics_perbox'].append( [] )
                    fg_ids = depth_fgmask_img[fg_mask].int() - 1
                    if len(fg_ids) > 0:
                        for idx in range(fg_ids.min().item(), fg_ids.max().item() + 1):
                            if batch_dict['gt_index'][b][idx] < 0:
                                continue
                            if torch.sum(fg_ids == idx) <= 5:
                                continue
                            errs_i = fg_local_errs[fg_ids == idx]
                            fg_gt_i_median = fg_gts[fg_ids == idx].median().item()
                            num_points_i = (fg_ids == idx).sum().item()
                            batch_dict['depth_error_fg_local_statistics_perbox'][-1].append(dict(
                                distance=fg_gt_i_median,
                                err_median=errs_i.median().item(),
                                num_points=num_points_i,
                                name=batch_dict['gt_names'][b][idx],
                                truncated=batch_dict['gt_truncated'][b][idx],
                                occluded=batch_dict['gt_occluded'][b][idx],
                                difficulty=batch_dict['gt_difficulty'][b][idx],
                                index=batch_dict['gt_index'][b][idx],
                                idx=idx,
                                image_idx=batch_dict['image_idx'][b]
                            ))

                            for thresh in [0.2, 0.4, 0.8, 1.6]:
                                batch_dict['depth_error_fg_local_statistics_perbox'][-1][-1][f"err_{thresh:.1f}m"] = (errs_i > thresh).float().mean().item()
        
        return batch_dict

def save_normalized_tensor_as_image(tensor, output_path):
    """
    Save a normalized tensor as an image.

    Args:
        tensor (numpy.ndarray): Input tensor of shape [1, H, W].
        output_path (str): Output path for saving the image.

    Returns:
        None
    """
    if tensor.device.type == 'cuda':
        tensor = tensor.cpu().numpy()

    import numpy as np
    from PIL import Image


    # Normalize the tensor to the range [0, 255]
    tensor_normalized = (tensor - np.min(tensor)) / (np.max(tensor) - np.min(tensor)) * 255

    # Convert the normalized tensor to a uint8 NumPy array
    tensor_uint8 = tensor_normalized.astype(np.uint8)

    # Reshape the tensor to [H, W] (assuming you want to save a single-channel image)
    tensor_2d = tensor_uint8[0]

    # Create a PIL Image from the NumPy array
    image = Image.fromarray(tensor_2d)

    # Save the image
    image.save(output_path)