from __future__ import print_function
import torch
import torch.nn as nn
import torch.utils.data
import torch.nn.functional as F
import numpy as np

# 2d卷积，批归一化
def convbn(in_planes, out_planes, kernel_size, stride, pad, dilation): # dilation扩张卷积
    return nn.Sequential(
        nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=dilation if dilation > 1 else pad, dilation = dilation, bias=False), # 2d卷积
        nn.BatchNorm2d(out_planes)) # 批归一化

# 3d卷积，批归一化
def convbn_3d(in_planes, out_planes, kernel_size, stride, pad):
    return nn.Sequential(
        nn.Conv3d(in_planes, out_planes, kernel_size=kernel_size, padding=pad, stride=stride,bias=False), # 3d卷积
        nn.BatchNorm3d(out_planes)) # 3d批归一化

# 残差块
class BasicBlock(nn.Module):
    expansion = 1 # 残差块扩张通道系数=1

    def __init__(self, inplanes, planes, stride, downsample, pad, dilation):
        super(BasicBlock, self).__init__()

        self.conv1 = nn.Sequential(
            convbn(inplanes, planes, 3, stride, pad, dilation), # 2d卷积，批归一化，输入inplanes通道，输出planes通道，尺寸/stride，扩张系数dilation
            nn.ReLU(inplace=True)) # ReLU

        self.conv2 = convbn(planes, planes, 3, 1, pad, dilation) # 2d卷积，批归一化，通道不变，尺寸不变，扩张系数dilation

        self.downsample = downsample # 下采样层
        self.stride = stride

    def forward(self, x):
        out = self.conv1(x) # 2d卷积，批归一化，ReLU，输入inplanes通道，输出planes通道，尺寸/stride，扩张系数dilation
        out = self.conv2(out) # 2d卷积，批归一化，通道不变，尺寸不变，扩张系数dilation

        if self.downsample is not None: # 若下采样层存在
            x = self.downsample(x) # 2d卷积，批归一化，输入inplanes通道，输出planes*残差块扩张通道系数1，尺寸/stride

        out += x # 残差连接

        return out

# 视差回归，soft argmin
class disparityregression(nn.Module):
    def __init__(self, maxdisp):
        super(disparityregression, self).__init__()
        self.disp = torch.Tensor(np.reshape(np.array(range(maxdisp)), [1, maxdisp, 1, 1])).cuda() # 创建(B,D,H,W)形状的数列，D维是对应是最大视差的序列值，转换为视差体积张量

    def forward(self, x): # 输入softmax激活得到的概率体积
        out = torch.sum(x*self.disp.data, 1, keepdim=True) # 概率体积*视差体积，在视差维度求和，输出(B,H,W)
        return out

# 特征提取
class feature_extraction(nn.Module):
    def __init__(self):
        super(feature_extraction, self).__init__()
        self.inplanes = 32 # 层结构的输入通道数
        self.firstconv = nn.Sequential(
            convbn(3, 32, 3, 2, 1, 1), # 2d卷积，批归一化，输入3通道，输出32通道，尺寸/2
            nn.ReLU(inplace=True),
            convbn(32, 32, 3, 1, 1, 1), # 2d卷积，批归一化，通道不变，尺寸不变
            nn.ReLU(inplace=True),
            convbn(32, 32, 3, 1, 1, 1), # 2d卷积，批归一化，通道不变，尺寸不变
            nn.ReLU(inplace=True))

        self.layer1 = self._make_layer(BasicBlock, 32, 3, 1, 1, 1) # 构造层结构，3个残差块，输入32通道，输出32通道，无下采样层，尺寸不变，无扩张卷积
        self.layer2 = self._make_layer(BasicBlock, 64, 16, 2, 1, 1) # 构造层结构，16个残差块，输入32通道，输出64通道，下采样层，尺寸/4，无扩张卷积
        self.layer3 = self._make_layer(BasicBlock, 128, 3, 1, 1, 1) # 构造层结构，3个残差块，输入64通道，输出128通道，下采样层，尺寸不变，无扩张卷积，论文中扩张卷积系数=2
        self.layer4 = self._make_layer(BasicBlock, 128, 3, 1, 1, 2) # 构造层结构，3个残差块，输入128通道，输出128通道，下采样层，尺寸不变，扩张卷积系数2(卷积核扩张)，论文中扩张卷积系数=4

        self.branch1 = nn.Sequential(
            nn.AvgPool2d((64, 64), stride=(64, 64)), # 2d平均池化，尺寸/64
            convbn(128, 32, 1, 1, 0, 1), # 2d卷积，批归一化，输入128通道，输出32通道，尺寸不变
            nn.ReLU(inplace=True)) # ReLU

        self.branch2 = nn.Sequential(
            nn.AvgPool2d((32, 32), stride=(32, 32)), # 2d平均池化，尺寸/32
            convbn(128, 32, 1, 1, 0, 1), # 2d卷积，批归一化，输入128通道，输出32通道，尺寸不变
            nn.ReLU(inplace=True)) # ReLU

        self.branch3 = nn.Sequential(
            nn.AvgPool2d((16, 16), stride=(16, 16)), # 2d平均池化，尺寸/16
            convbn(128, 32, 1, 1, 0, 1), # 2d卷积，批归一化，输入128通道，输出32通道，尺寸不变
            nn.ReLU(inplace=True)) # ReLU

        self.branch4 = nn.Sequential(
            nn.AvgPool2d((8, 8), stride=(8, 8)), # 2d平均池化，尺寸/8
            convbn(128, 32, 1, 1, 0, 1), # 2d卷积，批归一化，输入128通道，输出32通道，尺寸不变
            nn.ReLU(inplace=True)) # ReLU

        self.lastconv = nn.Sequential(
            convbn(320, 128, 3, 1, 1, 1), # 2d卷积，批归一化，输入320通道，输出128通道，尺寸不变
            nn.ReLU(inplace=True), # ReLU
            nn.Conv2d(128, 32, kernel_size=1, padding=0, stride=1, bias=False)) # 2d卷积，输入128通道，输出32通道，尺寸不变

    # 构造层结构
    def _make_layer(self, block, planes, blocks, stride, pad, dilation):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion: # 步长!=1或输入通道!=输出通道
            downsample = nn.Sequential( # 下采样层
                nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), # 输入inplanes通道，输出planes，尺寸/stride
                nn.BatchNorm2d(planes * block.expansion),) # 批归一化

        layers = []
        layers.append(block(self.inplanes, planes, stride, downsample, pad, dilation)) # 加入残差块，输入inplanes通道，输出planes通道，尺寸/stride，下采样层(尺寸/stride)，扩张系数dilation
        self.inplanes = planes * block.expansion # 修改输入通道数为层结构的输出通道数
        for i in range(1, blocks): # 重复残差块blocks-1次
            layers.append(block(self.inplanes, planes, 1, None, pad, dilation)) # 加入残差块，输入inplanes通道，输出planes通道，尺寸不变，无下采样层，扩张系数dilation

        return nn.Sequential(*layers) # 解包layers数列

    def forward(self, x):
        # 残差特征提取
        output = self.firstconv(x) # 输入3通道，输出32通道，1/2原尺寸
        output = self.layer1(output) # 构造层结构，3个残差块，输入32通道，输出32通道，尺寸不变
        output_raw = self.layer2(output) # 构造层结构，16个残差块，输入32通道，输出64通道，下采样层，1/4原尺寸
        output = self.layer3(output_raw) # 构造层结构，3个残差块，输入64通道，输出128通道，下采样层，尺寸不变
        output_skip = self.layer4(output) # 构造层结构，3个残差块，输入128通道，输出128通道，下采样层，尺寸不变，扩张系数2

        # 空间金字塔SPP模块
        output_branch1 = self.branch1(output_skip) # 平均池化，输入128通道，输出32通道，1/256原尺寸
        output_branch1 = F.interpolate(output_branch1, (output_skip.size()[2], output_skip.size()[3]), mode='bilinear') # 双线性插值到1/4原尺寸

        output_branch2 = self.branch2(output_skip) # 平均池化，输入128通道，输出32通道，1/128原尺寸
        output_branch2 = F.interpolate(output_branch2, (output_skip.size()[2], output_skip.size()[3]), mode='bilinear') # 双线性插值到1/4原尺寸

        output_branch3 = self.branch3(output_skip) # 平均池化，输入128通道，输出32通道，1/64原尺寸
        output_branch3 = F.interpolate(output_branch3, (output_skip.size()[2], output_skip.size()[3]), mode='bilinear') # 双线性插值到1/4原尺寸

        output_branch4 = self.branch4(output_skip) # 平均池化，输入128通道，输出32通道，1/32原尺寸
        output_branch4 = F.interpolate(output_branch4, (output_skip.size()[2], output_skip.size()[3]), mode='bilinear') # 双线性插值到1/4原尺寸

        output_feature = torch.cat((output_raw, output_skip, output_branch4, output_branch3, output_branch2, output_branch1), 1) # 在通道维度连接output_raw、output_skip和4个金字塔体积
        output_feature = self.lastconv(output_feature) # 输入320通道，输出32通道，1/4原尺寸

        return output_feature # 返回特征图谱
