'''
原版：基础模型为DepthFormer，环境需要mmcv(torch1.9.0+cu111)
'''
import torch
import torch.nn as nn
import torch.nn.functional as F

from .mde_ops import resize
from .mde_encoder_swin import DepthFormerSwin
from .mde_decoder import DenseDepthHead
from .mde_neck_hahi import HAHIHeteroNeck
from .common import *

class CSPNAccelerate(nn.Module):
    def __init__(self, kernel_size):
        super(CSPNAccelerate, self).__init__()
        self.kernel_size = kernel_size

    def forward(self, kernel, input, input0): # 引导图[b,9*9,h,w]，深度图[b,1,h,w]，初始深度[b,1,h,w]
        bs = input.size()[0] # batch_size
        h, w = input.size()[2], input.size()[3]

        input_im2col = F.unfold(input, self.kernel_size, 1, self.kernel_size//2, 1) # (1): 9*9窗口展开，[b,9*9,h*w]

        kernel = kernel.reshape(bs, self.kernel_size * self.kernel_size, h * w) # 引导图，[b,9*9,h*w]

        input0 = input0.view(bs, 1, h * w) # 初始深度，[b,1,h*w]
        mid_index = int((self.kernel_size * self.kernel_size - 1) / 2) # 通道中心位置索引，对应展开的中心点
        input_im2col[:, mid_index:mid_index + 1, :] = input0 # (2): 展开图通道中心赋为初始深度，[b,9*9,h*w]

        output = (input_im2col * kernel).sum(dim=1) # (3): 卷积，sum(展开图*引导权重)，[b,1,h*w]
        return output.view(bs, 1, h, w) # [b,1,h,w]

class depthprompting(nn.Module):
    def __init__(self, args):
        super(depthprompting, self).__init__()

        self.args = args
        self.prop_layer = CSPNAccelerate(args.prop_kernel)
        self.num_neighbors = self.args.prop_kernel * self.args.prop_kernel

        self.conv1_dep = conv_bn_relu(1, 64, kernel=3, stride=1, padding=1, bn=False)
        net = get_resnet34(not self.args.no_res_pre)

        self.conv2 = net.layer1
        self.conv3 = net.layer2
        self.conv4 = net.layer3
        self.conv5 = net.layer4

        del net

        self.conv6 = conv_bn_relu(512, 512, kernel=3, stride=2, padding=1)
        self.conv7 = conv_bn_relu(512, 512, kernel=3, stride=2, padding=1)


        self.dec6 = convt_bn_relu(1536+512, 512, kernel=3, stride=2, padding=1, output_padding=1)
        self.dec5 = convt_bn_relu(768+512+512, 512, kernel=3, stride=2, padding=1, output_padding=1)
        self.dec4 = convt_bn_relu(384+512+512, 512, kernel=3, stride=2, padding=1, output_padding=1)
        self.dec3 = convt_bn_relu(192+512+256, 256, kernel=3, stride=2, padding=1, output_padding=1)

        self.dec2 = convt_bn_relu(64+256+128, 128, kernel=3, stride=2, padding=1, output_padding=1)


        self.gd_dec1 = conv_bn_relu(128+64, 64, kernel=3, stride=1, padding=1)
        self.gd_dec0 = conv_bn_relu(64+64, self.num_neighbors, kernel=3, stride=1, padding=1, bn=False, relu=False)

        if self.args.conf_prop:
            self.cf_dec1 = conv_bn_relu(128+64, 32, kernel=3, stride=1, padding=1)
            self.cf_dec0 = nn.Sequential(
                nn.Conv2d(32+64, 1, kernel_size=3, stride=1, padding=1),
                nn.Sigmoid()
            )

        params = []
        for param in self.named_parameters():
            if param[1].requires_grad:
                params.append(param[1])


        self.backbone = DepthFormerSwin()
        self.neck = HAHIHeteroNeck()
        self.decode_head = DenseDepthHead(max_depth=args.max_depth)
        self.align_corners = True

        if self.args.use_bias_tuning:
            print("Bias Tuning !")
            for name, var in self.backbone.named_parameters():
                if not 'bias' in name:
                    var.requires_grad = False
            for name, var in self.neck.named_parameters():
                if not 'bias' in name:
                    var.requires_grad = False
            for name, var in self.decode_head.named_parameters():
                if not 'bias' in name:
                    var.requires_grad = False
        else:
            print("Monodcular Depth Model FREEZE !!")
            for name, var in self.backbone.named_parameters():
                var.requires_grad = False
            for name, var in self.neck.named_parameters():
                var.requires_grad = False
            for name, var in self.decode_head.named_parameters():
                var.requires_grad = False

    def _concat(self, fd, fe, dim=1):
        _, _, Hd, Wd = fd.shape
        _, _, He, We = fe.shape

        if Hd > He:
            h = Hd - He
            fd = fd[:, :, :-h, :]

        if Wd > We:
            w = Wd - We
            fd = fd[:, :, :, :-w]

        f = torch.cat((fd, fe), dim=dim)

        return f

    def forward(self, sample):
        # 图像编/解码器，使用DepthFormer输出相对深度
        x = self.backbone(sample['rgb']) # 相对深度骨干，[[b,64,h/2,h/2],[b,192,h/4,w/4],[b,384,h/8,w/8],[b,768,h/16,w/16],[b,1536,h/32,w/32]]
        x = self.neck(x) # 相对深度颈部，(卷积特征指的是骨干第一层stem？？？)，[[b,64,h/2,h/2],[b,192,h/4,w/4],[b,384,h/8,w/8],[b,768,h/16,w/16],[b,1536,h/32,w/32]]
        x_freeze_features = x # 多尺度特征图，[[b,64,h/2,h/2],[b,192,h/4,w/4],[b,384,h/8,w/8],[b,768,h/16,w/16],[b,1536,h/32,w/32]]
        x = self.decode_head(x) # 相对深度解码头，输出相对深度[b,1,h,w]
        out = resize(input=x, size=sample['dep'].shape[2:], mode='bilinear', align_corners=self.align_corners) # 相对深度和稀疏深度的尺寸对齐
        out = torch.clamp(out, min=1e-3, max=1e3) # 相对深度截断，避免最小二乘时出现nan

        dep = sample['dep'] # 稀疏深度，[b,1,h,w]
        dep = dep * (out < 255) # 剔除天空点云，[b,1,h,w]
        if dep.sum()==0.: # 稀疏深度中不存在可用值，直接输出相对深度
            out = torch.clamp(out, min=self.args.min_depth, max=self.args.max_depth)
            output = {'pred': out, 'pred_init': out, 'rel': out, 'pred_inter': None, 'guidance': None, 'confidence': None}
            return output

        # 获得所有位置的初始深度
        if self.args.init_scailing:
            pred_init = out.detach().clone() # 相对深度，[b,1,h,w]
            for i in range(pred_init.shape[0]): # 在batch维度单独处理张量
                mask_p = (pred_init[i] < 255) # 天空先验区域掩码，[1,h,w]
                dep_ = dep[i] # 稀疏深度，[1,h,w]
                idx_nnz = torch.nonzero(dep_.view(-1) > 0.0001, as_tuple=False) # 稀疏深度展平为1维，取有效深度的索引，[n]
                B = dep_.view(-1)[idx_nnz] # 有效位置的稀疏深度，[n,1]
                A = pred_init[i].view(-1)[idx_nnz] # 对应位置的相对深度，[n,1]
                num_dep = A.shape[0] # 有效深度数量，n
                if num_dep < 16: # 数量太小，跳过最小二乘计算
                    continue

                A = torch.cat((A, torch.ones(num_dep, 1).to(A)), dim=1) # 相对深度增加同尺寸的全1张量，[n,2]
                # X = torch.linalg.lstsq(A, B).solution # 报错：RuntimeError: cusolver error: CUSOLVER_STATUS_INTERNAL_ERROR, when calling `cusolverDnCreate(handle)`
                X = torch.linalg.lstsq(A.cpu(), B.cpu()).solution # @@在cpu上计算最小二乘解，满足(||AX-B||F)^2，输出[2]=[w,b]，w是系数值，b是偏置值
                X = X.to(pred_init)
                pred_init[i] = pred_init[i] * X[0] + X[1] # 初始深度=w*相对深度+b，[b,1,h,w]

                if pred_init[i].max() < self.args.max_depth: # 处理天空先验区域
                    pred_init[i] = mask_p * pred_init[i] + torch.logical_not(mask_p) * self.args.max_depth

        else: # 不计算初始深度
            pred_init = out # 初始深度=相对深度，[b,1,h,w]

        pred_init = torch.clamp(pred_init, min=self.args.min_depth, max=self.args.max_depth) # 深度截断

        # 稀疏深度编码器，部分层使用resnet34预训练权重
        fe1 = self.conv1_dep(dep) # [b,64,h,w]
        fe2 = self.conv2(fe1) # resnet34.layer1，[b,64,h,w]
        fe3 = self.conv3(fe2) # resnet34.layer2，[b,128,h/2,w/2]
        fe4 = self.conv4(fe3) # resnet34.layer3，[b,256,h/4,w/4]
        fe5 = self.conv5(fe4) # resnet34.layer4，[b,512,h/8,w/8]
        fe6 = self.conv6(fe5) # [b,512,h/16,w/16]
        fe7 = self.conv7(fe6) # [b,512,h/32,w/32]

        # 稀疏深度编码器的后半部分，连接图像、稀疏深度编/解码特征的U-Net解码器
        fd6 = self.dec6(self._concat(x_freeze_features[4], fe7)) # [b,1536+512,h/32,w/32]->[b,512,h/16,w/16]
        fd5 = self.dec5(self._concat(x_freeze_features[3], self._concat(fd6, fe6))) # [b,768+512+512,h/16,w/16]->[b,512,h/8,w/8]
        fd4 = self.dec4(self._concat(x_freeze_features[2], self._concat(fd5, fe5))) # [b,384+512+512,h/8,w/8]->[b,512,h/4,w/4]
        fd3 = self.dec3(self._concat(x_freeze_features[1], self._concat(fd4, fe4))) # [b,192+512+256,h/4,w/4]->[b,256,h/2,w/2]
        fd2 = self.dec2(self._concat(x_freeze_features[0], self._concat(fd3, fe3))) # [b,64+256+128,h/2,w/2]->[b,128,h,w]

        gd_fd1 = self.gd_dec1(self._concat(fd2, fe2)) # [b,128+64,h,w]->[b,64,h,w]
        guide = self.gd_dec0(self._concat(gd_fd1, fe1)) # 引导图，[b,64+64,h,w]->[b,9*9,h,w]

        # 置信图，融合图像、稀疏深度编/解码特征的sigmoid激活
        if self.args.conf_prop:
            cf_fd1 = self.cf_dec1(self._concat(fd2, fe2)) # [b,128+64,h,w]->[b,32,h,w]
            confidence = self.cf_dec0(self._concat(cf_fd1, fe1)) # [b,32+64,h,w]->[b,1,h,w]，sigmoid激活
        else:
            confidence = None

        depth = pred_init # 深度图=初始深度，[b,1,h,w]
        sparse_dep = dep # 稀疏深度，[b,1,h,w]
        sparse_mask = sparse_dep.sign() # 稀疏掩码，正1负-1零0
        pred_inter = [pred_init] # 空间传播结果列表

        if self.args.conf_prop:
            sparse_dep = sparse_dep * confidence # 稀疏深度*=置信图，[b,1,h,w]

        guide_sum = torch.sum(guide.abs(), dim=1, keepdim=True) # 引导图沿通道求和，[b,9*9,h,w]->[b,1,h,w]
        guide = torch.div(guide, guide_sum) # 逐元素除法，在通道上归一化引导图，[b,9*9,h,w]

        # 深度解码器，空间传播
        if self.args.data_name in ['NYU', 'IPAD', 'VOID', 'SUNRGBD']:
            for i in range(self.args.prop_time):
                depth = self.prop_layer(guide, depth, pred_init)
                depth = sparse_dep * sparse_mask + (1 - sparse_mask) * depth
                pred_inter.append(depth)

        elif self.args.data_name in ['KITTIDC', 'NUSCENE']:
            for i in range(self.args.prop_time):
                depth = sparse_dep * sparse_mask + (1 - sparse_mask) * depth
                depth = self.prop_layer(guide, depth, pred_init)
                pred_inter.append(depth)

        elif self.args.data_name in ['WaterScenes']:
            for i in range(self.args.prop_time): # 传播次数
                depth = sparse_mask * sparse_dep + (1 - sparse_mask) * depth # 直接映射稀疏深度，可能造成噪声传递
                # depth = self.prop_layer(guide, depth, sparse_dep) # 卷积空间传播
                depth = self.prop_layer(guide, depth, pred_init) # 卷积空间传播
                pred_inter.append(depth) # 本次空间传播结果

        depth = torch.clamp(depth, min=0) # 深度图截断

        # 深度图[b,1,h,w]，初始深度[b,1,h,w]，相对深度[b,1,h,w]，空间传播列表[b,1,h,w]*(次数+1)，引导图[9*9,h,w]，置信图[b,1,h,w]
        output = {'pred': depth, 'pred_init': pred_init, 'rel': out, 'pred_inter': pred_inter, 'guidance': guide, 'confidence': confidence}

        return output
