from __future__ import print_function
import torch
import torch.nn as nn
import torch.utils.data
from torch.autograd import Variable
import torch.nn.functional as F
import math
from .submodule import *

# 堆叠沙漏3D卷积模块，中继监督
class hourglass(nn.Module):
    def __init__(self, inplanes):
        super(hourglass, self).__init__()

        self.conv1 = nn.Sequential(
            convbn_3d(inplanes, inplanes*2, kernel_size=3, stride=2, pad=1), # 3d卷积，批归一化，输入inplanes通道，输出inplanes*2通道，尺寸/2
            nn.ReLU(inplace=True)) # ReLU

        self.conv2 = convbn_3d(inplanes*2, inplanes*2, kernel_size=3, stride=1, pad=1) # 3d卷积，批归一化，通道不变，尺寸不变

        self.conv3 = nn.Sequential(
            convbn_3d(inplanes*2, inplanes*2, kernel_size=3, stride=2, pad=1), # 3d卷积，批归一化，通道不变，尺寸/2
            nn.ReLU(inplace=True)) # ReLU

        self.conv4 = nn.Sequential(
            convbn_3d(inplanes*2, inplanes*2, kernel_size=3, stride=1, pad=1), # 3d卷积，批归一化，通道不变，尺寸不变
            nn.ReLU(inplace=True)) # ReLU

        self.conv5 = nn.Sequential(
            nn.ConvTranspose3d(inplanes*2, inplanes*2, kernel_size=3, padding=1, output_padding=1, stride=2, bias=False), # 3d转置卷积，通道不变，尺寸*2
            nn.BatchNorm3d(inplanes*2)) #+conv2，3d批归一化

        self.conv6 = nn.Sequential(
            nn.ConvTranspose3d(inplanes*2, inplanes, kernel_size=3, padding=1, output_padding=1, stride=2, bias=False), # 3d转置卷积，输入inplanes*2通道，输出inplanes通道，尺寸*2
            nn.BatchNorm3d(inplanes)) #+x，3d批归一化

    def forward(self, x, presqu, postsqu):
        out = self.conv1(x) #in:1/4 out:1/8，输入inplanes通道，输出inplanes*2通道，尺寸/2
        pre = self.conv2(out) #in:1/8 out:1/8，通道不变，尺寸不变
        if postsqu is not None:
            pre = F.relu(pre + postsqu, inplace=True) # 加上postsqu后ReLU
        else:
            pre = F.relu(pre, inplace=True) # ReLU

        out = self.conv3(pre) #in:1/8 out:1/16，通道不变，尺寸/2
        out = self.conv4(out) #in:1/16 out:1/16，通道不变，尺寸不变

        if presqu is not None:
           post = F.relu(self.conv5(out) + presqu, inplace=True) #in:1/16 out:1/8，通道不变，尺寸*2，加上presqu后ReLU
        else:
           post = F.relu(self.conv5(out) + pre, inplace=True) # 通道不变，尺寸*2，加上pre后ReLU

        out = self.conv6(post) #in:1/8 out:1/4，输入inplanes*2通道，输出inplanes通道，尺寸*2

        return out, pre, post # 返回，out：通道不变，尺寸不变；pre：通道*2，尺寸/2；post：通道*2，尺寸/2

class PSMNet(nn.Module):
    def __init__(self, maxdisp):
        super(PSMNet, self).__init__()
        self.maxdisp = maxdisp

        self.feature_extraction = feature_extraction() # 特征提取

        self.dres0 = nn.Sequential(
            convbn_3d(64, 32, 3, 1, 1), # 3d卷积，批归一化，输入64通道，输出32通道，尺寸不变
            nn.ReLU(inplace=True), # ReLU
            convbn_3d(32, 32, 3, 1, 1), # 3d卷积，批归一化，输入32通道，输出32通道，尺寸不变
            nn.ReLU(inplace=True)) # ReLU

        self.dres1 = nn.Sequential(
            convbn_3d(32, 32, 3, 1, 1), # 3d卷积，批归一化，输入32通道，输出32通道，尺寸不变
            nn.ReLU(inplace=True), # ReLU
            convbn_3d(32, 32, 3, 1, 1)) # 3d卷积，批归一化，输入32通道，输出32通道，尺寸不变

        self.dres2 = hourglass(32) # 堆叠沙漏3D卷积模块，输入32通道，输出out：32通道，尺寸不变；pre：64通道，尺寸不变；post：64通道，尺寸不变

        self.dres3 = hourglass(32) # 堆叠沙漏3D卷积模块，输入32通道，输出out：32通道，尺寸不变；pre：64通道，尺寸不变；post：64通道，尺寸不变

        self.dres4 = hourglass(32) # 堆叠沙漏3D卷积模块，输入32通道，输出out：32通道，尺寸不变；pre：64通道，尺寸不变；post：64通道，尺寸不变

        self.classif1 = nn.Sequential(
            convbn_3d(32, 32, 3, 1, 1), # 3d卷积，批归一化，输入32通道，输出32通道，尺寸不变
            nn.ReLU(inplace=True), # ReLU
            nn.Conv3d(32, 1, kernel_size=3, padding=1, stride=1, bias=False)) # 3d卷积，输入32通道，输出1通道，尺寸不变

        self.classif2 = nn.Sequential(
            convbn_3d(32, 32, 3, 1, 1), # 3d卷积，批归一化，输入32通道，输出32通道，尺寸不变
            nn.ReLU(inplace=True), # ReLU
            nn.Conv3d(32, 1, kernel_size=3, padding=1, stride=1, bias=False)) # 3d卷积，输入32通道，输出1通道，尺寸不变

        self.classif3 = nn.Sequential(
            convbn_3d(32, 32, 3, 1, 1), # 3d卷积，批归一化，输入32通道，输出32通道，尺寸不变
            nn.ReLU(inplace=True), # ReLU
            nn.Conv3d(32, 1, kernel_size=3, padding=1, stride=1, bias=False)) # 3d卷积，输入32通道，输出1通道，尺寸不变

        # 初始化权重
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.Conv3d):
                n = m.kernel_size[0] * m.kernel_size[1]*m.kernel_size[2] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
            elif isinstance(m, nn.BatchNorm3d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
            elif isinstance(m, nn.Linear):
                m.bias.data.zero_()

    def forward(self, left, right):
        refimg_fea= self.feature_extraction(left) # 输入左视图(B,3,H,W)，输出左视图特征图谱，32通道，1/4原尺寸
        targetimg_fea = self.feature_extraction(right) # 与左视图共享权重，输出右视图特征图谱

        # matching，构造串联成本体积
        cost = Variable(torch.FloatTensor(refimg_fea.size()[0], refimg_fea.size()[1]*2, self.maxdisp//4, refimg_fea.size()[2], refimg_fea.size()[3]).zero_()).cuda() # 创建4D成本体积张量，(B,32*2,D,H,W)，1/4原尺寸

        for i in range(self.maxdisp//4): # 循环处理视差级别
            if i > 0 :
                cost[:, :refimg_fea.size()[1], i, :, i:] = refimg_fea[:, :, :, i:] # 左视图切片i视差级别，放在前32通道
                cost[:, refimg_fea.size()[1]:, i, :, i:] = targetimg_fea[:, :, :, :-i] # 右视图右移i视差级别，放在后32通道
            else:
                cost[:, :refimg_fea.size()[1], i, :, :] = refimg_fea
                cost[:, refimg_fea.size()[1]:, i, :, :] = targetimg_fea
        cost = cost.contiguous()

        cost0 = self.dres0(cost) # 输入64通道，输出32通道，尺寸不变
        cost0 = self.dres1(cost0) + cost0 # 通道不变，尺寸不变，残差连接cost0

        out1, pre1, post1 = self.dres2(cost0, None, None) # 堆叠沙漏3D卷积模块，输入cost0，32通道，1/4原尺寸，输出out1：32通道，尺寸不变；pre1和post1：64通道，尺寸不变
        out1 = out1 + cost0 # 残差连接cost0

        out2, pre2, post2 = self.dres3(out1, pre1, post1) # 堆叠沙漏3D卷积模块，输入cost1，32通道，1/4原尺寸，输出out2：32通道，尺寸不变，pre1和post1用于中继监督
        out2 = out2 + cost0 # 残差连接cost0

        out3, pre3, post3 = self.dres4(out2, pre1, post2) # 堆叠沙漏3D卷积模块，输入cost1，32通道，1/4原尺寸，输出out2：32通道，尺寸不变，pre1和post2用于中继监督(为什么使用pre1而不是pre2，猜测监督信息可以使用更早的)
        out3 = out3 + cost0 # 残差连接cost0

        cost1 = self.classif1(out1) # 输入32通道，输出1通道，尺寸不变
        cost2 = self.classif2(out2) + cost1 # 输入32通道，输出1通道，尺寸不变，残差连接cost1
        cost3 = self.classif3(out3) + cost2 # 输入32通道，输出1通道，尺寸不变，残差连接cost2

        if self.training: # 若不进行训练，则不输出前两个估计视差
            cost1 = F.interpolate(cost1, [self.maxdisp, left.size()[2], left.size()[3]], mode='trilinear') # 双线性插值到(D,H,W)的尺寸
            cost2 = F.interpolate(cost2, [self.maxdisp, left.size()[2], left.size()[3]], mode='trilinear') # 双线性插值到(D,H,W)的尺寸

            cost1 = torch.squeeze(cost1, 1) # 删除通道维度，(B,D,H,W)
            pred1 = F.softmax(cost1, dim=1) # 概率体积，对视差维度做softmax激活
            pred1 = disparityregression(self.maxdisp)(pred1) # 视差回归，soft argmin函数，输出(B,H,W)

            cost2 = torch.squeeze(cost2,1) # 删除通道维度，(B,D,H,W)
            pred2 = F.softmax(cost2, dim=1) # 概率体积，对视差维度做softmax激活
            pred2 = disparityregression(self.maxdisp)(pred2) # 视差回归，soft argmin函数，输出(B,H,W)

        cost3 = F.interpolate(cost3, [self.maxdisp, left.size()[2],left.size()[3]], mode='trilinear') # 双线性插值到(D,H,W)的尺寸
        cost3 = torch.squeeze(cost3, 1) # 删除通道维度，(B,D,H,W)
        pred3 = F.softmax(cost3, dim=1) # 概率体积，对视差维度做softmax激活
        #For your information: This formulation 'softmax(c)' learned "similarity"
        #while 'softmax(-c)' learned 'matching cost' as mentioned in the paper.
        #However, 'c' or '-c' do not affect the performance because feature-based cost volume provided flexibility.
        # 上述注释解释了soft argmin函数中的-cd在实际运用中为什么普遍不加负号的问题
        pred3 = disparityregression(self.maxdisp)(pred3) # 视差回归，soft argmin函数，输出(B,H,W)

        if self.training:
            return pred1, pred2, pred3
        else: # 非训练不输出前两个估计视差
            return pred3
