# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================

import os
from typing import Optional

import numpy as np
import tinycudann as tcnn
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from torch.nn import SmoothL1Loss
from torch_efficient_distloss import (eff_distloss, eff_distloss_native,
                                      flatten_eff_distloss)
import jax
import jax.numpy as jnp


tensor2number = lambda x: x.item() if isinstance(x, torch.Tensor) else x
empty_criterion = lambda x: (x**2).mean()
img2mse = lambda x, y: torch.mean((x - y) ** 2)
img2mae = lambda x, y: torch.sqrt((x - y).pow(2) + 1e-4).mean() # torch.mean(torch.abs(x - y) + 1e-4)
mse2psnr = lambda x: -10. * np.log10(x)

def get_loss_weights(opt, global_step=0): 
    # this func can enable epoch to adjust the loss weights
    loss_weights = dict()
    loss_weights['rgb'] = opt.rgb_r
    loss_weights['depth'] = opt.depth_r

    if "eikonal_r" in vars(opt) and opt.eikonal_r > 0:
        loss_weights['eikonal'] = opt.eikonal_r

    if "sdf_r" in vars(opt) and opt.sdf_r > 0:
        loss_weights['sdf'] = opt.sdf_r

    if "front_r" in vars(opt) and opt.front_r > 0:
        loss_weights['front'] = opt.front_r

    if "empty_r" in vars(opt) and opt.empty_r > 0:
        loss_weights['empty'] = opt.empty_r

    if "reg_r" in vars(opt) and opt.reg_r > 0:
        loss_weights['reg'] = opt.reg_r

    if "mip_r" in vars(opt) and opt.mip_r > 0:
        loss_weights['mip'] = opt.mip_r

    if "info_r" in vars(opt) and opt.info_r > 0:
        loss_weights['info'] = opt.info_r

    if "tv_r" in vars(opt) and opt.tv_r > 0:
        loss_weights['tv'] = opt.tv_r

    if "truncate_r" in vars(opt) and opt.truncate_r > 0:
        loss_weights['truncate'] = opt.truncate_r

    if "wentropy_r" in vars(opt) and opt.wentropy_r > 0:
        loss_weights['wentropy'] = opt.wentropy_r

    if "region_depth_r" in vars(opt) and opt.region_depth_r > 0:
        assert loss_weights['depth'] <= 0, "region depth loss and depth loss is only valid one"
        loss_weights['region_depth'] = opt.region_depth_r

    if "varw_r" in vars(opt) and opt.varw_r > 0:
        loss_weights['varw'] = opt.varw_r

    if "varc_r" in vars(opt) and opt.varc_r > 0:
        loss_weights['varc'] = opt.varc_r

    if "ssim_r" in vars(opt) and opt.ssim_r > 0:
        loss_weights['ssim'] = opt.ssim_r

    return loss_weights


class Loss_Logger:
    def __init__(self, loss_weights):
        self.avg_logger = {"all": 0}
        for key in loss_weights:
            self.avg_logger[key] = 0

    def reset(self):
        # reset all value in self.avg_logger to 0
        for key in self.avg_logger:
            self.avg_logger[key] = 0

    def log(self, loss_dict):
        # loss_dict: {'rgb': rgb_loss, 'depth': depth_loss, 'eikonal': eikonal_loss, 'sdf': sdf_loss, 'front': front_loss, 'empty': empty_loss, 'all': all_loss}
        for key in loss_dict:
            self.avg_logger[key] += tensor2number(loss_dict[key])

    def log_total_loss(self, total_loss):
        self.avg_logger['all'] += total_loss

    def get_total_loss(self):
        return self.avg_logger['all']

    def report_in_tensorboard(self, writer, local_step, global_step):
        for key in self.avg_logger:
            writer.add_scalar(key, self.avg_logger[key] / local_step, global_step)


class Base_Loss(nn.Module):
    def __init__(self):
        super().__init__()

    def get_loss_from_type(self, loss_type):
        raise NotImplementedError()

    def forward(self, x, y):
        raise NotImplementedError()


class Free_nerf_loss(Base_Loss):
    '''
        This class is used to calculate the loss of the nerf model
    '''
    def __init__(self, opt):
        super().__init__()
        self.opt = opt          
        if self.opt.decay_iter > 0: # if a decay iteration is set
            self.decay_iter = self.opt.decay_iter
        elif self.opt.decay_iter == 0: # if decay iteration is explicitly set to 0
            self.decay_iter = int(self.opt.iters // 2) # set decay iteration to half of the total number of iterations
        else: # if no decay iteration is set
            self.decay_iter = 1e10 # set decay iteration to a very large number

    def forward(self, outputs, gt, global_step=0):
        loss_weights = get_loss_weights(self.opt, global_step)
        losses = {}

        pred_rgb = outputs['rgb']
        pred_depth = outputs['depth'].unsqueeze(-1)
        pred_rgb0 = outputs.get('rgb0', None)
        z_vals = outputs['z_vals']
        w = outputs['weights']
        rgbs = outputs['raw_rgb']

        if "confidence" in gt.keys() and self.opt.view_ratio > 0:
            confidence = gt['confidence'].clone() # [N, 1]
            if confidence.shape[0] < pred_rgb.shape[0]:
                print("condfidence shape is wrong!!!")
        else:
            confidence = None

        if confidence is not None and global_step >= self.decay_iter:
            aug_mask = confidence > 1
            if global_step == self.decay_iter:
                print("decay iter start!!!")
        else:
            aug_mask = None

        gt_rgb, gt_depth = gt['images'], gt['depths'] # [N,3]; [N,1]
        cur_psnr = mse2psnr(img2mse(pred_rgb, gt_rgb).item())

        for loss_type in loss_weights:
            if loss_type == 'rgb':
                losses[loss_type] = compute_loss(pred_rgb, gt_rgb,loss_type=get_loss_type(self.opt))
                if pred_rgb0 is not None:
                    losses[loss_type] += compute_loss(pred_rgb0, gt_rgb,loss_type=get_loss_type(self.opt))

            elif loss_type == 'depth':
                # losses['depth'] = self.get_depth_loss(pred_depth, gt_depth, 'l1', confidence, global_step, robust=True)
                # a new method for decay and confidence:
                # 1. if gt_depth is all zeros, return zeros
                # 2. if global_step > decay_iter, disable loss on confidence <= 1
                # 3. if global_step <= decay_iter, loss is calculated by confidence

                valid_mask = gt_depth > 0
                masked_pred_depth = pred_depth[valid_mask]
                masked_gt_depth = gt_depth[valid_mask]
                masked_confidence = confidence[valid_mask] if confidence is not None else None

                # check if gt_depth is all zeros
                if torch.sum(gt_depth) == 0:
                    losses['depth'] = torch.zeros_like(losses['rgb'])
                else:
                    losses['depth'] = robust_depth_loss(masked_pred_depth, masked_gt_depth, delta=0.1, reduction='none', robust=self.opt.robust)

                    if masked_confidence is not None and self.opt.conf_mean:
                        if aug_mask is None:
                            losses['depth'] = (losses['depth'] * masked_confidence).mean() # adjust
                        elif aug_mask is not None and aug_mask.sum() > 0:
                            losses['depth'] = (losses['depth'] * masked_confidence)[aug_mask[valid_mask]].mean() # adjust only for aug_mask
                        else:
                            losses['depth'] = torch.zeros_like(losses['rgb'])
                    else:
                        if global_step > self.decay_iter:
                            losses['depth'] = torch.zeros_like(losses['rgb'])
                        else:
                            losses['depth'] = torch.mean(losses['depth'])

            elif loss_type == 'empty':
                depth_mask = gt['depth'] > 0.
                omega = self.opt.init_empty_omega * (1 - global_step/self.opt.iters) + 0.05 # use global step to decrease omega
                empty_mask = (gt_depth[depth_mask]-omega).unsqueeze(-1).repeat_interleave(z_vals.shape[-1],dim=-1) > z_vals[depth_mask[0,:,0]]
                masked_density = density[depth_mask[0,:,0]].masked_select(empty_mask)
                loss_empty = empty_criterion(masked_density) if masked_density.abs().sum() > 0 else 0.
                losses['empty'] = loss_empty

            elif loss_type == 'eikonal':
                grad_theta = outputs['grad_theta']
                eikonal_loss = get_eikonal_loss(grad_theta)
                losses['eikonal'] = eikonal_loss

            elif loss_type == 'sdf':
                sdf,z_vals = outputs['sigma'],outputs['z_vals'], # [N,T],[N,T]
                truncation = self.opt.truncation * self.opt.scale
                loss_front, loss_sdf = get_sdf_loss(z_vals, gt_depth[0], sdf, truncation, "l2") # B=1, [N_ray, N_sample]
                losses['front'],losses['sdf'] = loss_front, loss_sdf

            elif loss_type == 'reg': # loss for regularization
                losses['reg'] = get_cauchy_loss(density) # Cauchy loss only used in coarse sample stage !!!
                
            elif loss_type == 'mip':
                losses['mip'] = cal_dist_loss(w, m, interval)

            elif loss_type == 'info':
                # use info loss reported in infoNeRF
                acc, alpha, N_s = outputs['acc'], outputs['alpha'], pred_rgb.shape[1] 
                losses['info'] = get_info_loss(acc, alpha, N_s, True) # in this naive implementation, use all rays for computing info loss

            elif loss_type == "truncate":
                # this loss is a masked entropy loss. May change the weight during training
                sigma,z_vals,alpha = outputs['sigma'],outputs['z_vals'],outputs['alpha'] # [N,T],[N,T]
                losses['truncate'] = get_truncated_loss(z_vals, pred_depth, gt_depth, alpha, outputs['weights'],cur_iter=global_step,max_iter=1000)

            elif loss_type == "wentropy":
                losses['wentropy'] = get_wentropy_loss(outputs['weights'])

            elif loss_type == "region_depth":
                losses['region_depth'] = get_regioned_depth_loss(w, z_vals, pred_depth, gt_depth)

            elif loss_type == "varw":
                if global_step > int(self.opt.iters * 0.01): # just disable this loss at the beginning for better convergence
                    # augmask and check augmask is not all False
                    if aug_mask is not None and aug_mask.sum() > 0:
                        losses['varw'] = get_varw_loss(w, z_vals, pred_depth, confidence, enable=self.opt.conf_varw)[aug_mask.squeeze()].mean()
                    elif aug_mask is None:
                        losses['varw'] = get_varw_loss(w, z_vals, pred_depth, confidence, enable=self.opt.conf_varw).mean()
                    else:
                        losses['varw'] = torch.zeros_like(losses['rgb'])
                else:
                    losses['varw'] = torch.zeros_like(losses['rgb'])

            elif loss_type == "varc":
                if global_step > int(self.opt.iters * 0.01):
                    if aug_mask is not None and aug_mask.sum() > 0:
                        losses['varc'] = get_varc_loss(w, rgbs, pred_rgb, confidence, enable=self.opt.conf_varc)[aug_mask.squeeze()].mean()
                    elif aug_mask is None:
                        losses['varc'] = get_varc_loss(w, rgbs, pred_rgb, confidence, enable=self.opt.conf_varc).mean()
                    else:
                        losses['varc'] = torch.zeros_like(losses['rgb'])
                else:
                    losses['varc'] = torch.zeros_like(losses['rgb'])
                    
            elif loss_type == "ssim":
                losses[loss_type] = ssim_loss(pred_rgb, gt_rgb, self.opt.patch_size)
                if pred_rgb0 is not None:
                    losses[loss_type] += ssim_loss(pred_rgb0, gt_rgb, self.opt.patch_size)

            elif loss_type == "tv":
                losses[loss_type] = tv_loss(pred_depth, patch_size = self.opt.patch_size).mean()

            else:
                raise NotImplementedError()
        # [debug]
        # print(losses,"\n")
        # import IPython; IPython.embed()
        return losses, self.sum_losses(losses, loss_weights), cur_psnr

    def get_loss_logger(self):
        return self.loss_logger

    def get_total_loss(self):
        return self.loss_logger.get_total_loss()

    def get_depth_loss(self, pred_depth, gt_depth, loss_type='l1',confidence=None, global_step=0,robust=False):
        dummy_depth = not gt_depth.any() # dummy depth means gt_depth are all zeros
        if dummy_depth:
            return torch.tensor(0.0).type_as(pred_depth) # [B, N, 1] -> [1]

        elif self.opt.conf_mean and confidence is not None:
            depth_mask = gt_depth > 0

            eps = 1e-4
            num_pixel = torch.numel(depth_mask)
            num_valid = depth_mask.float().sum() + eps
            depth_valid_weight = 1

            if robust:
                depth_loss = robust_depth_loss(pred_depth[depth_mask], gt_depth[depth_mask], confidence=confidence[depth_mask]) * depth_valid_weight

            return depth_loss
        else:
            # normal type l1 depth loss
            depth_mask = gt_depth > 0
            eps = 1e-4
            num_pixel = torch.numel(depth_mask)
            num_valid = depth_mask.float().sum() + eps
            depth_valid_weight = num_pixel / num_valid
            # if self.opt.decay_iter == 0:
            #     decay_step = [self.opt.iters//2] # decay depth loss [10000, 20000, 30000] 0.1
            # elif self.opt.decay_iter >= 1:
            #     decay_step = [self.opt.decay_iter]
            # else:
            #     decay_step = [1000000000000]

            if global_step > self.decay_iter:
                    # print("start decay depth loss!!!")
                    depth_valid_weight = 0

        if robust:
            depth_loss = robust_depth_loss(pred_depth * depth_mask, gt_depth * depth_mask) * depth_valid_weight
        else:
            depth_loss = compute_loss(pred_depth * depth_mask, gt_depth * depth_mask, loss_type) * depth_valid_weight

        return depth_loss

        # return get_region_depth_loss(pred_depth,gt_depth,loss_type) if not dummy_depth else torch.tensor(0.0).type_as(pred_depth) # [B, N, 1] -> [1]

    def sum_losses(self, losses, loss_weights=None):
        all_loss = 0
        for key in losses.keys():
            all_loss += losses[key] * loss_weights[key]
        # # debug
        # # print loss items after plus weight
        # for key in losses.keys():
        #     print(key,losses[key] * loss_weights[key])
            
        return all_loss

    

'''
    code from neural RGBD
'''
def compute_loss(prediction, target, loss_type='L2'):
    if loss_type == 'L2':
        return img2mse(prediction, target)
    elif loss_type == 'L1':
        return img2mae(prediction, target)
    elif loss_type == 'smoothL1':
        return F.smooth_l1_loss(prediction, target)
    else:
        raise Exception('Unsupported loss type')


def get_masks(z_vals, target_d, truncation):
    valid_mask = (target_d > 1e-3).float() # valid depth ray
    front_mask = (z_vals < (target_d - truncation)).float() # [N_ray, N_sample], front_ray mask
    back_mask = (z_vals > (target_d + truncation)).float() # [N_ray, N_sample], back_ray mask
    
    sdf_mask = (1.0 - front_mask) * (1.0 - back_mask) * valid_mask # [N_ray, N_sample]

    num_fs_samples = front_mask.sum()
    num_sdf_samples = sdf_mask.sum()
    num_samples = num_sdf_samples + num_fs_samples
    fs_weight = 1.0 - num_fs_samples / num_samples
    sdf_weight = 1.0 - num_sdf_samples / num_samples

    return front_mask, sdf_mask, fs_weight, sdf_weight

def compute_tv_norm(values, losstype='l2', weighting=None):  # pylint: disable=g-doc-args
    """Returns TV norm for input values.

    Note: The weighting / masking term was necessary to avoid degenerate
    solutions on GPU; only observed on individual DTU scenes.
    """
    v00 = values[:, :-1, :-1]
    v01 = values[:, :-1, 1:]
    v10 = values[:, 1:, :-1]

    if losstype == 'l2':
        loss = ((v00 - v01) ** 2) + ((v00 - v10) ** 2)
    elif losstype == 'l1':
        loss = jnp.abs(v00 - v01) + jnp.abs(v00 - v10)
    else:
        raise ValueError('Not supported losstype.')

    if weighting is not None:
        loss = loss * weighting
    return loss

    # v_1_00 = values[:, :-1, :-1]
    # v_1_01 = values[:, :-1, 1:]
    # v_1_10 = values[:, 1:, :-1]
    # v11 = values[:, 1:-1, 1:-1]
    # v12 = values[:, :-2, 1:-1]
    # v10 = values[:, 2:, 1:-1]
    # v21 = values[:, 1:-1, :-2]
    # v01 = values[:, 1:-1, 2:]

    # if losstype == 'l2':
    #   loss_1 = ((v_1_00 - v_1_01) ** 2) + ((v_1_00 - v_1_10) ** 2)
    #   loss_2 = ((v12 + v10 - v11 * 2) ** 2) + ((v21 + v01 - v11 * 2) ** 2)
    # elif losstype == 'l1':
    #   loss_1 = jnp.abs(v_1_00 - v_1_01) + jnp.abs(v_1_00 - v_1_10)
    #   loss_2 = jnp.abs(v12 + v10 - v11 * 2) + jnp.abs(v21 + v01 - v11 * 2)
    # else:
    #   raise ValueError('Not supported losstype.')
    # loss = 0.5 * (loss_1[:, 1:, 1:] + loss_2)
    # if weighting is not None:
    #   loss = loss * weighting
    # return loss


def get_sdf_loss(z_vals, target_d, predicted_sdf, truncation, loss_type):

    front_mask, sdf_mask, fs_weight, sdf_weight = get_masks(z_vals, target_d, truncation)

    fs_loss = compute_loss(predicted_sdf * front_mask, torch.ones_like(predicted_sdf) * front_mask, loss_type) * fs_weight
    sdf_loss = compute_loss((z_vals + predicted_sdf * truncation) * sdf_mask, target_d * sdf_mask, loss_type) * sdf_weight

    return fs_loss, sdf_loss


def get_eikonal_loss(grad_theta):
    eikonal_loss = ((grad_theta.norm(2, dim=1) - 1) ** 2).mean()
    return eikonal_loss


def get_depth_loss(predicted_depth, target_d, loss_type='l2', **kwargs):
    depth_mask = target_d > 0
    eps = 1e-4
    num_pixel = torch.numel(depth_mask)
    num_valid = depth_mask.float().sum() + eps
    depth_valid_weight = num_pixel / num_valid

    return compute_loss(predicted_depth * depth_mask, target_d * depth_mask, loss_type) * depth_valid_weight

def get_region_depth_loss(predicted_depth, target_d, loss_type='l1', region=0.1):
    depth_mask = target_d > 0
    eps = 1e-4
    num_pixel = torch.numel(depth_mask)
    num_valid = depth_mask.float().sum() + eps
    depth_valid_weight = num_pixel / num_valid

    # we use a region for depth loss, if predicted depth is in this region, we do not compute the loss
    region_mask = (predicted_depth > (target_d - region)) * (predicted_depth < (target_d + region))
    region_mask = region_mask * depth_mask
    region_loss = compute_loss(predicted_depth * region_mask, target_d * region_mask, loss_type) * depth_valid_weight
    return region_loss


def search_for_tsdf(path):
    for root, dirs, files in os.walk(path):
        for file in files:
            if file.endswith('.npz') and file.startswith('tsdf'):
                return os.path.join(root, file)
    return False


def pred_load_tsdf(data_root):
    def load_npz(data_path):
        arrays = np.load(data_path, allow_pickle=False)
        return arrays
    if os.path.exists(os.path.join(data_root, 'tsdf_vol.npz')):
        return load_npz(os.path.join(data_root, 'tsdf_vol.npz'))
    elif search_for_tsdf(data_root):
        return load_npz(search_for_tsdf(data_root))
    else:
        import warnings
        warnings.warn('[WARN] tsdf_vol.npz not found. Please run `scripts/make_init_tsdf_vol.sh` to generate it.')
        import sys
        sys.exit()


def cal_dist_loss(w,m,interval):
    '''
    distortion loss. The distance of dist may need normalization. [yc_debug]
    There are B rays each with N sampled points.
    w:        Float tensor in shape [B,N]. Volume rendering weights of each point.
    m:        Float tensor in shape [B,N]. Midpoint distance to camera of each point.
    interval: Scalar or float tensor in shape [B,N]. The query interval of each point.
    '''
    return eff_distloss(w,m,interval)


@torch.jit.script
def sdf_from_tsdf_vol(xyz: torch.Tensor, tsdf_vol: torch.Tensor, voxel_size: torch.Tensor, voxel_origin: torch.Tensor, 
        rescale: bool = False, mode: str = 'bilinear') -> torch.Tensor:
    """
    Use F.grid_sample to get sdf from tsdf_vol
    Args:
        xyz: [Nr, Ns, 3]
        tsdf_vol: [D, D, D]
        voxel_size: float
        voxel_origin: [3]
    return:
        sdf: [N]
    """
    dim = tsdf_vol.shape[1]
    vol_length = min(voxel_size * dim, 2.0) # [yc_debug] in this case, we use 2.0 as the vol_length, since the xyz is in [-bound, bound]

    if rescale: # if rescale, we need to rescale the xyz to [-1, 1]
        xyz -= voxel_origin # move to voxel_origin
        xyz /= vol_length # move to [0, 1]
        xyz = xyz.clamp(0, 1) # clamp to [0, 1]
        xyz = 2 * xyz - 1 # convert to [-1,1] since grid_sample requires

    # xyz->zyx NOTE: In gridsample, XYZ<->zyx, [1, D, H, W, 3]
    xyz = xyz[:, :, [ 2, 1, 0 ]] # [Nr, Ns, 3]
    xyz = xyz[None,None] # [1, 1, Nr, Ns, 3]=>[B, D, H, W, 3]

    # convert tsdf_vol to [1, 1, X, Y, Z]
    tsdf_vol = tsdf_vol[None, None, :, :, :]

    sdf = F.grid_sample(tsdf_vol, xyz, padding_mode='border',mode=mode,
        align_corners=False).squeeze() # [1, 1, Nr, Ns, 1] -> [Nr, Ns] NOTE: may use nearest mode instead

    return sdf.unsqueeze(-1) # return [Nr, Ns, 1]


@torch.jit.script
def get_info_loss(acc:Tensor, alpha:Tensor, N_samples_for_info:int, computing_entropy_all:bool,type_:str='log2',acc_thres:float=0.1,entropy_log_scaling:bool=False):
    # implement from https://github.com/mjmjeong/InfoNeRF/blob/4eac3a34096380803a436acc9197001b69bdfdbc/utils/loss.py#L13

    if not computing_entropy_all:
        acc = acc[N_samples_for_info:]
        alpha = alpha[N_samples_for_info:]
    ray_prob = alpha / (torch.sum(alpha,-1).unsqueeze(-1)+1e-10)

    if type_ == "log2":
        entropy_ray = -1*ray_prob*torch.log2(ray_prob+1e-10)
    elif type_ == "1-p":
        entropy_ray = ray_prob*torch.log2(1-ray_prob)
    else:
        raise NotImplementedError

    entropy_ray_loss = torch.sum(entropy_ray, -1) # [Nr]
    
    # masking no hitting poisition
    if acc is not None:
        mask = (acc>acc_thres).detach()
        entropy_ray_loss*= mask

    if entropy_log_scaling:
        return torch.log(torch.mean(entropy_ray_loss) + 1e-10)

    return torch.mean(entropy_ray_loss)

def get_tv_loss(values, losstype='l2', weighting=None):
    # tv loss from regnerf: https://github.com/kosaurang/google-research/blob/1eb91df32837756eefb37b7b2c103230e29ee39e/regnerf/internal/math.py#L238
    """Returns TV norm for input values.
    Note: The weighting / masking term was necessary to avoid degenerate
    solutions on GPU; only observed on individual DTU scenes.
    """
    v00 = values[:, :-1, :-1] # 64, 7, 7, 1
    v01 = values[:, :-1, 1:] # 64, 7, 7, 1
    v10 = values[:, 1:, :-1] # 64, 7, 7, 1

    if losstype == 'l2':
        loss = ((v00 - v01) ** 2) + ((v00 - v10) ** 2)
    elif losstype == 'l1':
        loss = (v00 - v01).abs() + (v00 - v10).abs()
    else:
        raise ValueError('Not supported losstype.')

    if weighting is not None:
        loss = loss * weighting
    return loss


def compute_tvnorm_weight(step, max_step, weight_start=0.0, weight_end=0.0):
    """Computes loss weight for tv norm."""
    # w = torch.clamp(, 0, 1)
    w = step * 1.0 / (1 if (max_step < 1) else max_step)
    w = min(max(w, 0.0), 1.0)
    return weight_start * (1 - w) + w * weight_end


def get_truncated_loss(z_vals:Tensor, pred_depth:Tensor, depth:Tensor, sigma:Tensor, weight:Tensor, alpha:Optional[Tensor]=None,max_iter=-1,cur_iter=-1):
    """In fact, truncated_loss is masked loss for depth. Contains empty loss, truncated entropy loss, and a far regulation loss."""
    depth_mask = (depth > 0).detach() # [1, Nr, 1]
    if not depth_mask.any(): # if no depth, return empty loss
        return 0.0
    masked_depth = depth[depth_mask] # [Nr]
    masked_pred_depth = pred_depth[depth_mask].detach() # valid mask
    masked_z_vals = z_vals[depth_mask.squeeze()]
    masked_sigma = sigma[depth_mask.squeeze()]

    front_mask, sdf_mask, back_mask, fs_weight, sdf_weight, bs_weight=get_truncated_region_mask(masked_z_vals, masked_pred_depth, masked_depth)

    # empty loss for front points density
    front_loss = empty_criterion(masked_sigma*front_mask) * fs_weight

    # entropy loss for region points density
    density_prob_sdf = (masked_sigma*sdf_mask)/(torch.sum(masked_sigma*sdf_mask,-1).unsqueeze(-1)+1e-10)
    entropy_region_loss = (-1*density_prob_sdf*torch.log2(density_prob_sdf+1e-10)).sum(-1).mean() * sdf_weight

    # a inverse entropy loss for back points density
    # density_prob_back = (masked_sigma*back_mask)/(torch.sum(masked_sigma*back_mask,-1).unsqueeze(-1)+1e-10)
    # entropy_back_loss = (-1*density_prob_back*torch.log2(density_prob_back+1e-10)).sum(-1).mean() * bs_weight
    entropy_back_loss = 0.0 # back_mask * bs_weight

    if max_iter > 0: # enable loss after init
        tr_w = compute_tvnorm_weight(cur_iter, max_iter, weight_start=0.0, weight_end=1.0)
    else:
        tr_w = 1.0
    # print(f"front_loss: {front_loss.item()}, entropy_region_loss: {entropy_region_loss.item()}, entropy_back_loss: {entropy_back_loss}, tr_w: {tr_w}")
    return tr_w*(front_loss+entropy_region_loss+entropy_back_loss)

def get_truncated_region_mask(z_vals, pred_depth, depth):
    interval = torch.clamp_min((depth - pred_depth).abs(),1/128) # [Nr]
    bmin = torch.clamp_min_(torch.min(pred_depth, depth).squeeze() - interval, 0) # [1, Nr]
    bmax = torch.clamp_max_(torch.max(pred_depth, depth).squeeze() + interval, 5) # [1, Nr]

    front_mask = (z_vals < bmin.unsqueeze(-1)) # [1, Nr]
    back_mask = (z_vals > bmax.unsqueeze(-1)) # [1, Nr]
    sdf_mask = ~(front_mask | back_mask) # [1, Nr]

    num_fs_samples = front_mask.sum()
    num_sdf_samples = sdf_mask.sum()
    num_bs_samples = back_mask.sum()
    num_samples = num_sdf_samples + num_fs_samples # + num_bs_samples
    fs_weight = 1 - num_sdf_samples/num_samples # (num_sdf_samples+num_bs_samples)/num_samples
    sdf_weight = 1 - num_fs_samples/num_samples # (num_fs_samples+num_bs_samples)/num_samples
    bs_weight = 1. # (num_sdf_samples+num_fs_samples)/num_samples

    # [debug]
    # print(f"num_fs_samples: {num_fs_samples}, num_sdf_samples: {num_sdf_samples}, num_bs_samples: {num_bs_samples}, num_samples: {num_samples}, fs_weight: {fs_weight}, sdf_weight: {sdf_weight}, bs_weight: {bs_weight}")
    # print(f"interval_max: {torch.max(interval)}, interval_min: {torch.min(interval)}, interval_mean: {torch.mean(interval)}, interval_std: {torch.std(interval)}")
    # print(f"mean_sdf_sample: {num_sdf_samples/z_vals.shape[0]}, mean_fs_sample: {num_fs_samples/z_vals.shape[0]}, mean_bs_sample: {num_bs_samples/z_vals.shape[0]}")
    return front_mask, sdf_mask, back_mask, fs_weight, sdf_weight, bs_weight

def get_cauchy_loss(sigma, c):
    '''Cauchy loss from SNeRG'''
    return torch.log(1 + (sigma / c) ** 2).sum()

def get_wentropy_loss(weight):
    '''entropy loss on weight'''
    def entropy(p):
        return -p * torch.log2(p + 1e-10)
    entropy_loss = entropy(weight).sum(-1).mean()
    return entropy_loss

def get_regioned_depth_loss(weights, z_vals, pred_depth, gt_depth, near=0.05, far=0.2):
    '''GNLL loss from dense depth prior, first we get a valid region mask, then we compute the loss on valid region'''
    valid_mask = gt_depth > 0 # [1, Nr, 1]
    pred_mean = pred_depth[valid_mask]
    if valid_mask.sum() == 0:
        return torch.zeros((1,), device=pred_depth.device, requires_grad=True)
    pred_var = ((z_vals[valid_mask.squeeze()] - pred_mean.unsqueeze(-1)).pow(2) * weights[valid_mask.squeeze()]).sum(-1) + 1e-5
    near_region, far_region = 0.01, 0.2 # [...|..iou..|.(sdf_depth).|..iou..|...]
    valid_grad_mask = find_ray_depth_in_region(near_region, far_region, pred_mean, gt_depth[valid_mask])
    # only depth in valid region will be used to compute the loss
    regioned_loss = torch.nn.functional.gaussian_nll_loss(pred_mean[valid_grad_mask], 
        gt_depth[valid_mask][valid_grad_mask], pred_var[valid_grad_mask], reduction='mean') 
    return (valid_mask.sum() / weights.shape[0]) * regioned_loss

def is_not_in_expected_distribution(depth_mean, depth_var, depth_measurement_mean, depth_measurement_std):
    delta_greater_than_expected = ((depth_mean - depth_measurement_mean).abs() - depth_measurement_std) > 0.
    var_greater_than_expected = depth_measurement_std.pow(2) < depth_var
    return torch.logical_or(delta_greater_than_expected, var_greater_than_expected)

def find_ray_depth_in_region(region_near, region_far, pred_depth_mean, gt_depth):
    near_region = [gt_depth-region_near, gt_depth+region_near]
    far_region = [gt_depth-region_far, gt_depth+region_far]
    near_mask = torch.logical_and(pred_depth_mean > near_region[0], pred_depth_mean < near_region[1]) # pred is near 
    far_mask = torch.logical_or(pred_depth_mean < far_region[0], pred_depth_mean > far_region[1]) # pred is far 
    valid_grad_mask = torch.logical_not(torch.logical_or(near_mask, far_mask)) # pred neither near nor far
    return valid_grad_mask

def hard_loss(weights):
    """hard loss from lolnerf, -log(exp(-|x|) + exp(1-|x|))"""
    return -torch.log(torch.exp(-weights.abs()) + torch.exp(1-weights.abs())).sum(-1).mean()

def get_varw_loss(weights, z_vals, pred_depth, confidence=None, enable=True):
    '''used for control the var of weight'''
    if confidence is None or enable is False:
        pred_var = ((z_vals - pred_depth).pow(2) * weights).sum(-1) + 1e-10
    else:
        pred_var = ((z_vals - pred_depth).pow(2) * weights).sum(-1) * confidence.squeeze() + 1e-10
    return pred_var

def get_varc_loss(w, rgbs, pred_rgb, confidence=None, enable=True):
    '''used for control the var of rgb, weighted by w. weight is detached.'''
    if confidence is None or enable is False:
        pred_var = ((rgbs - pred_rgb.unsqueeze(1)).pow(2) * w.detach().unsqueeze(-1)).sum(-2).sum(-1) + 1e-10 # a simple l2 loss
    else:
        pred_var = ((rgbs - pred_rgb.unsqueeze(1)).pow(2) * w.detach().unsqueeze(-1)).sum(-2).sum(-1) * confidence.squeeze() + 1e-10
    return pred_var

def get_varc_redmean_loss(w, rgbs, pred_rgb):
    '''var rgb loss with redmean rgb distance'''
    pass

# def robust_depth_loss(pred_d, sdf_d, delta=0.1):
#     '''robust loss for inaccurate depth, contains near and far part'''
#     distance = (pred_d - sdf_d).abs()
#     delta_mask = (distance <= delta).float()
#     near_loss = 0.5 * (distance ** 2)
#     far_loss = delta ** 2 * (0.5 + torch.log(distance / delta))
#     # form a mask to select the near loss or far loss
#     loss = delta_mask * near_loss + (1-delta_mask) * far_loss
#     return loss.mean()

def robust_depth_loss(input, target, delta=0.1, reduction='none', robust=True):
    '''Robust loss for inaccurate depth, contains near and far part.

    Args:
    - input (tensor): predicted depth map
    - target (tensor): ground truth depth map
    - delta (float): threshold for near and far regions
    - reduction (str): reduction mode for the loss
    - robust (bool): whether to use robust loss or not

    Returns:
    - loss (tensor): the computed loss value
    '''

    if robust:
        if delta < 1e-5:
            # If delta is very small, use absolute difference
            loss = torch.abs(input - target)
        else:
            # Compute the loss using the robust loss function
            n = torch.abs(input - target)
            cond = n < delta
            loss = torch.where(cond, 0.5 * (input - target)**2 / delta, delta * (0.5 + torch.log(n / delta + 1e-5)))

        # Apply reduction mode to the computed loss
        if reduction == "mean":
            loss = loss.mean() if loss.numel() > 0 else 0.0 * loss.sum()
        elif reduction == "sum":
            loss = loss.sum()
        elif reduction == "none":
            pass
        else:
            raise ValueError("Invalid reduction mode: {}".format(reduction))
    else:
        # Use mean squared error loss if robust is False
        loss = torch.nn.functional.mse_loss(input, target, reduction=reduction)

    # Check if the computed loss contains NaN values
    if torch.isnan(loss).any():
        print('nan loss!!!!')
        exit()

    # debug
    print(torch.isnan(loss).any())

    return loss


def get_loss_type(opt):
    if opt.L1:
        return "L1"
    if opt.smoothL1:
        return "smoothL1"
    else:
        return "L2"

from pytorch_msssim import MS_SSIM, SSIM, ms_ssim, ssim

# X: (N,3,H,W) a batch of non-negative RGB images (0~255)
# Y: (N,3,H,W) 
def ssim_loss(image, gt, patch_size=8):
    # note that the image is [N, 3], convert it to [Num_patch, patch_size, patch_size, 3]
    num_patch = image.shape[0] // (patch_size * patch_size)
    image = image.reshape(num_patch, patch_size, patch_size, 3)
    gt = gt.reshape(num_patch, patch_size, patch_size, 3)
    # # debug: we save the GT patch to check the sampled patch is correct
    # import imageio.v2 as imageio
    # for i in range(num_patch):
    #     imageio.imwrite(f"patch{i}.png", (gt[i].cpu().numpy()*255.).astype(np.uint8))
    ms_ssim_loss = 1 - ssim(image, gt, data_range=1, size_average=True, win_size=3)
    return ms_ssim_loss


def tv_loss(values, losstype='l2', patch_size=8):
    """Returns TV norm for input values.

    Note: The weighting / masking term was necessary to avoid degenerate
    solutions on GPU; only observed on individual DTU scenes.
    Inputs:
        values: (B, H, W) tensor of values, e.g. depth maps
        losstype: 'l1' or 'l2' norm
    """
    # note that the image is [N, 3], convert it to [Num_patch, patch_size, patch_size, 3]
    num_patch = values.shape[0] // (patch_size * patch_size)
    values = values.reshape(num_patch, patch_size, patch_size, 1)

    v00 = values[:, :-1, :-1]
    v01 = values[:, :-1, 1:]
    v10 = values[:, 1:, :-1]

    if losstype == 'l2':
        loss = ((v00 - v01) ** 2) + ((v00 - v10) ** 2)
    elif losstype == 'l1':
        loss = torch.abs(v00 - v01) + torch.abs(v00 - v10)
    else:
        raise ValueError('Not supported losstype.')

    return loss


if __name__=="__main__":
    pass
    # from packaging import version as pver
    # def custom_meshgrid(*args):
    #     # ref: https://pytorch.org/docs/stable/generated/torch.meshgrid.html?highlight=meshgrid#torch.meshgrid
    #     if pver.parse(torch.__version__) < pver.parse('1.10'):
    #         return torch.meshgrid(*args)
    #     else:
    #         return torch.meshgrid(*args, indexing='ij')
    # tsdf_info = pred_load_tsdf("/Users/zhaixing/my_files/working_mesh/scene0050/tsdf_files")
    # tsdf_vol = torch.from_numpy(tsdf_info['tsdf_vol'])
    # voxel_size = torch.from_numpy(tsdf_info['vox_size'])
    # voxel_origin = torch.from_numpy(tsdf_info['vox_origin'])

    # bound_min = [-1,-1,-1]
    # bound_max = [1,1,1]
    # resolution = 124
    # S = 1

    # X = torch.linspace(bound_min[0], bound_max[0], resolution)
    # Y = torch.linspace(bound_min[1], bound_max[1], resolution)
    # Z = torch.linspace(bound_min[2], bound_max[2], resolution)

    # # u = np.zeros([resolution, resolution, resolution], dtype=np.float32)

    # with torch.no_grad():
    #     xx, yy, zz = custom_meshgrid(X, Y, Z)
    #     pts = torch.cat([xx.reshape(-1, 1), yy.reshape(-1, 1), zz.reshape(-1, 1)], dim=-1) # [S, 3]
    #     sdf=sdf_from_tsdf_vol(pts, tsdf_vol, voxel_size, voxel_origin)

    # import mcubes

    # sdf = sdf.numpy().reshape(resolution, resolution, resolution)
    # verts, faces = mcubes.marching_cubes(sdf, 0.0)

    # # Note that here extracted mesh's unit is 1, not voxel_size, transfer to world coordinate
    # verts = verts * voxel_size.numpy() + voxel_origin.numpy()

    # import trimesh
    # mesh = trimesh.Trimesh(vertices=verts, faces=faces)
    # mesh.show()

    # from torch.autograd import Variable

    # gt = torch.ones(1024, 3).clip(0, 1)
    # pred = torch.rand(1024, 3).clip(0, 1)

    # if torch.cuda.is_available():
    #     gt = gt.cuda()
    #     pred = pred.cuda()

    # optimizer = torch.optim.Adam([pred], lr=0.01)

    # x = Variable(pred, requires_grad=True)
    # y = Variable(gt, requires_grad=False)

    # optimizer.zero_grad()

    # ssim = ssim_loss(x, y)

    # ssim.backward()

    # optimizer.step()
    # print(ssim)


    # Example usage
    values = torch.randn(2, 100, 100, 4)
    tv_norm = compute_tv_norm(values, losstype='l2', weighting=None)
    print(tv_norm.shape)  # output: torch.Size([2, 99, 99, 4])

        