import math
from collections import OrderedDict
import torch
import torch.nn as nn
import torchvision.transforms as tt

from ddf_module import DDF1, DDF2, DDF3, DDF4

class netStructure(nn.Module):           
    def __init__(self):
        super(netStructure, self).__init__()  # 等价于nn.Module.__init__()
        # 
        # if image_mean is None:
        #     image_mean = [0.485, 0.456, 0.406]
        # if image_std is None:
        #     image_std = [0.229, 0.224, 0.225]

        # encoder部分：
        # input 4*224*224, 应需要先resize到227再进入网络   kernel尺寸： 7*7,s=2,p=0

        # 貌似输入的图片必须是3通道的，这里先修改成3通道，深度信息之后再说

        # 定义conv1: 64*7*7, stride=2 -> 64*111*111
        # self.conv1 = nn.Conv2d(4, 64, kernel_size=7, stride=2, padding=0, bias=False)
        self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=0, bias=False)

        # 定义maxpool
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2)
        
        # 定义DDF模块
        self.ddf1 = DDF1(8)
        self.ddf2 = DDF2(8)
        self.ddf3 = DDF3(4)
        self.ddf4 = DDF4(4)

        # decoder部分：
        # input 1180*7*7, 即DDF4的输出
        self.conv2 = nn.Conv2d(1180, 3, kernel_size=1, stride=1, padding=0, bias=False)
        self.conv3 = nn.Conv2d(496, 3, kernel_size=1, stride=1, padding=0, bias=False)
        self.conv4 = nn.Conv2d(224, 3, kernel_size=1, stride=1, padding=0, bias=False)
        # self.conv2 = nn.Conv2d(1572, 2, kernel_size=1, stride=1, padding=0, bias=False)
        # self.conv3 = nn.Conv2d(656, 2, kernel_size=1, stride=1, padding=0, bias=False)
        # self.conv4 = nn.Conv2d(288, 2, kernel_size=1, stride=1, padding=0, bias=False)

        # 反卷积操作
        tf = tt.ToTensor()
        self.deconv1 = nn.ConvTranspose2d(3, 3, kernel_size=6, stride=2, padding=1)
        self.deconv2 = nn.ConvTranspose2d(3, 3, kernel_size=6, stride=2, padding=1)
        self.deconv3 = nn.ConvTranspose2d(3, 3, kernel_size=216, stride=2, padding=1)

        # self.deconv11 = tf(self.deconv1)
        # self.deconv21 = tf(self.deconv2)
        # self.deconv31 = tf(self.deconv3)


        # 裁剪操作
        self.crop1 = tt.CenterCrop((16, 16))
        self.crop2 = tt.CenterCrop((34, 34))
        self.crop3 = tt.CenterCrop((227, 227))

        # 按位叠加操作
        # self.ele_add1 = torch.add(self.deconv1, self.crop1)
        # self.ele_add2 = torch.add(self.deconv2, self.crop2)

        # 进行权值初始化 
        # 从self.modules()中遍历每一层
        # 然后判断每层属于什么类型，是否是Conv2d，是否是BatchNorm2d，
        # 然后根据不同类型的层，设定不同的权值初始化方法
        for m in self.modules():
            if isinstance(m, nn.Conv2d):     # isinstance用于判断m是否是nn.Conv2d类型的数据
                # 用于计算权值所属的正态分布的标准差
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)    # 全填充1
                m.bias.data.zero_()
                # _表示在一个tensor上操作了之后，直接修改这个tensor


    # 只要在nn.Module的子类中定义了forward函数，backward函数就会被自动实现(利用Autograd)
    def forward(self, x):
        x = self.conv1(x)
        print("conv1",x.size())
        
        x = self.maxpool(x)
        print("maxpool",x.size())
        
        x1 = self.ddf1(x)
        print("ddf1",x1.size())
        
        x2 = self.ddf2(x1)
        print("ddf2",x2.size())
        
        x = self.ddf3(x2)
        print("ddf3",x.size())

        x = self.ddf4(x)
        print("ddf4",x.size())

        x = self.conv2(x)
        print("conv2",x.size())

        x = self.deconv1(x)
        print("deconv1",x.size())
        
        # xx = self.deconv11(x)

        x2_out = self.conv3(x2)
        print("conv3",x2_out.size())

        x2_out = self.crop1(x2_out)
        print("crop1",x2_out.size())

        # add操作需要输入参数是tensor
        x = torch.add(x2_out, x)
        # x = self.ele_add1(x2_out, x)
        print("ele_add1",x.size())

        x = self.deconv2(x)
        print("deconv2",x.size())

        x1_out = self.conv4(x1)
        print("conv4",x1_out.size())

        x1_out = self.crop2(x1_out)
        print("crop2",x1_out.size())

        x = torch.add(x1_out, x)
        # x = self.ele_add2(x1_out, xxx)
        print("ele_add2",x.size())

        x = self.deconv3(x)
        print("deconv3",x.size())

        output = self.crop3(x)
        print("crop3",output.size())

        return output



