import torch
import torch.nn as nn
from utils.config import BATCH_SIZE
from core.block import ResBlock, ThreeDBlock

# 神经网络
class GC_NET(nn.Module):
    def __init__(self, res_block, threed_block, height, width, max_disp):
        super(GC_NET, self).__init__()
        self.height = height
        self.width = width
        self.maxdisp = max_disp

        self.conv0 = torch.nn.Sequential( # 尺寸/2
            torch.nn.Conv2d(3, 32, 5, 2, 2), # 2D卷积：输入通道3，输出通道32，卷积核5*5，步长2，填充2，H/2*W/2
            torch.nn.BatchNorm2d(32), # 批归一化：输入输出通道32
            torch.nn.ReLU() # ReLU激活函数，第三方实现没有
        )

        self.res_block_1_16 = self._make_block(res_block, 32, 32, 8) # 残差块*8：输入通道32，输出通道32，重复8次，H/2*W/2

        self.conv17 = nn.Conv2d(32, 32, 3, 1, 1) # 2D卷积：输入通道32，输出通道32，卷积核3*3，步长1，填充1，H/2*W/2

        self.threed_conv18 = torch.nn.Sequential(
            torch.nn.Conv3d(64, 32, 3, 1, 1), # 3D卷积：输入通道64，输出通道32，卷积核3*3*3，步长1，填充1，D/2*H/2*W/2
            torch.nn.BatchNorm3d(32), # 批归一化：输入输出通道32
            torch.nn.ReLU() # ReLU激活函数
        )

        self.threed_conv19 = torch.nn.Sequential(
            torch.nn.Conv3d(32, 32, 3, 1, 1), # 3D卷积：输入通道32，输出通道32，卷积核3*3*3，步长1，填充1，D/2*H/2*W/2
            torch.nn.BatchNorm3d(32), # 批归一化：输入输出通道32
            torch.nn.ReLU() # ReLU激活函数
        )

        self.threed_conv20 = torch.nn.Sequential( # 尺寸/2
            torch.nn.Conv3d(64, 64, 3, 2, 1), # 3D卷积：输入通道64，输出通道64，卷积核3*3*3，步长2，填充1，D/4*H/4*W/4
            torch.nn.BatchNorm3d(64), # 批归一化
            torch.nn.ReLU() # ReLU激活函数
        )

        self.threed_conv23 = torch.nn.Sequential( # 尺寸/2
            torch.nn.Conv3d(64, 64, 3, 2, 1), # 3D卷积：输入通道64，输出通道64，卷积核3*3*3，步长2，填充1，D/8*H/8*W/8
            torch.nn.BatchNorm3d(64), # 批归一化
            torch.nn.ReLU() # ReLU激活函数
        )

        self.threed_conv26 = torch.nn.Sequential( # 尺寸/2
            torch.nn.Conv3d(64, 64, 3, 2, 1), # 3D卷积：输入通道64，输出通道64，卷积核3*3*3，步长2，填充1，D/16*H/16*W/16
            torch.nn.BatchNorm3d(64), # 批归一化
            torch.nn.ReLU() # ReLU激活函数
        )

        # 复用conv20~22的3D卷积块
        self.threed_block_20_22 = self._make_block(threed_block, 64, 64, 1) # 3D卷积块：输入通道64，输出通道64，重复1次，D/4*H/4*W/4
        self.threed_block_23_25 = self._make_block(threed_block, 64, 64, 1) # 3D卷积块：输入通道64，输出通道64，重复1次，D/8*H/8*W/8
        self.threed_block_26_28 = self._make_block(threed_block, 64, 64, 1) # 3D卷积块：输入通道64，输出通道64，重复1次，D/16*H/16*W/16
        self.threed_block_29_31 = self._make_block(threed_block, 64, 128, 1) # 3D卷积块：输入通道64，输出通道64，重复1次，D/32*H/32*W/32

        # 3D转置卷积传播时加残差连接和ReLU
        self.trans_conv32 = torch.nn.Sequential(
            torch.nn.ConvTranspose3d(128, 64, 3, 2, 1, 1), # 3D转置卷积：输入通道128，输出通道64，卷积核3*3*3，填充步长2，填充1，额外填充1，D/16*H/16*W/16
            torch.nn.BatchNorm3d(64), # 批归一化：输入输出通道64
        )

        self.trans_conv33 = torch.nn.Sequential(
            torch.nn.ConvTranspose3d(64, 64, 3, 2, 1, 1), # 3D转置卷积：输入通道64，输出通道64，卷积核3*3*3，填充步长2，填充1，额外填充1，D/8*H/8*W/8
            torch.nn.BatchNorm3d(64), # 批归一化：输入输出通道64
        )

        self.trans_conv34 = torch.nn.Sequential(
            torch.nn.ConvTranspose3d(64, 64, 3, 2, 1, 1), # 3D转置卷积：输入通道64，输出通道64，卷积核3*3*3，填充步长2，填充1，额外填充1，D/4*H/4*W/4
            torch.nn.BatchNorm3d(64), # 批归一化：输入输出通道64
        )

        self.trans_conv35 = torch.nn.Sequential(
            torch.nn.ConvTranspose3d(64, 32, 3, 2, 1, 1), # 3D转置卷积：输入通道64，输出通道32，卷积核3*3*3，填充步长2，填充1，额外填充1，D/2*H/2*W/2
            torch.nn.BatchNorm3d(32), # 批归一化：输入输出通道32
        )
        self.relu = torch.nn.ReLU()

        self.trans_conv36 = torch.nn.ConvTranspose3d(32, 1, 3, 2, 1, 1) # 3D转置卷积：输入通道32，输出通道1，卷积核3*3*3，填充步长2，填充1，额外填充1，D*H*W

        self.regression = DisparityRegression(max_disp)# 回归视差

    # 前向传播方法
    def forward(self, img_left, img_right):
        original_size = [BATCH_SIZE, self.maxdisp, img_left.size(2), img_left.size(3)]

        imgl_conv0_out = self.conv0(img_left)
        imgr_conv0_out = self.conv0(img_right)

        # 残差块
        imgl_res_block_1_16_out = self.res_block_1_16(imgl_conv0_out)
        imgr_res_block_1_16_out = self.res_block_1_16(imgr_conv0_out)

        imgl_conv17_out = self.conv17(imgl_res_block_1_16_out)
        imgr_conv17_out = self.conv17(imgr_res_block_1_16_out)

        # 构建成本体积
        cost_volum = self.cost_volume(imgl_conv17_out, imgr_conv17_out)

        threed_conv18_out = self.threed_conv18(cost_volum)
        threed_conv19_out = self.threed_conv19(threed_conv18_out)

        # 3D卷积块
        threed_block_20_22_out = self.threed_block_20_22(cost_volum)
        threed_conv20_out = self.threed_conv20(cost_volum)
        threed_block_23_25_out = self.threed_block_23_25(threed_conv20_out)
        threed_conv23_out = self.threed_conv23(threed_conv20_out)
        threed_block_26_28_out = self.threed_block_26_28(threed_conv23_out)
        threed_conv26_out = self.threed_conv26(threed_conv23_out)
        threed_block_29_31_out = self.threed_block_29_31(threed_conv26_out)

        # 3D转置卷积：残差连接+ReLU
        trans_conv32_out = self.relu(self.trans_conv32(threed_block_29_31_out) + threed_block_26_28_out)
        trans_conv33_out = self.relu(self.trans_conv33(trans_conv32_out) + threed_block_23_25_out)
        trans_conv34_out = self.relu(self.trans_conv34(trans_conv33_out) + threed_block_20_22_out)
        trans_conv35_out = self.relu(self.trans_conv35(trans_conv34_out) + threed_conv19_out)

        trans_conv36_out = self.trans_conv36(trans_conv35_out)

        # 构建概率体积
        Cd = trans_conv36_out.view(original_size)
        prob_volume = torch.softmax(-Cd, 1)

        # 回归为视差
        disp = self.regression(prob_volume)

        return disp

    # 创建重复的层结构
    def _make_block(self, block, in_channels, out_channels, block_num):
        blocks = []
        for i in range(block_num):
            blocks.append(block(in_channels, out_channels))
        return nn.Sequential(*blocks)

    # 构建成本体积
    def cost_volume(self, img_left, img_right):
        batch, channel, height, width = img_left.size() # 获得张量尺寸信息
        # 4D成本体积，通道*视差级别*高*宽
        cost_vol = torch.zeros(batch, channel * 2, self.maxdisp // 2, height, width).type_as(img_left) # 创建张量形状，通道2倍
        for i in range(self.maxdisp // 2): # 每个视差级别处理一次
            if i > 0: # 将左视图切片i像素，右视图右移i像素，在通道维度连接
                cost_vol[:, :channel, i, :, i:] = img_left[:, :, :, i:]
                cost_vol[:, channel:, i, :, i:] = img_right[:, :, :, :-i]
            else:
                cost_vol[:, :channel, i, :, :] = img_left
                cost_vol[:, channel:, i, :, :] = img_right
        return cost_vol # 返回4D成本体积，(B,C,D,H,W)

# 回归视差
class DisparityRegression(nn.Module):
    def __init__(self, max_disp):
        super().__init__()
        self.disp_score = torch.arange(max_disp) # 生成(0~max_disp-1)的视差范围，[D]
        self.disp_score = self.disp_score.unsqueeze(0).unsqueeze(2).unsqueeze(3) # 生成新张量形状，[1, D, 1, 1]

    def forward(self, prob_volume):
        disp_score = self.disp_score.expand_as(prob_volume).type_as(prob_volume) # 按概率体积扩展disp_score的形状(形状结合)，[B, D, H, W]
        out = torch.sum(disp_score * prob_volume, dim=1) # 在视差维度上相加，[B, H, W]
        return out

def GCNET(height, width, max_disp):
    return GC_NET(ResBlock, ThreeDBlock, height, width, max_disp)
