import torch
import torch.nn as nn
import torch.nn.functional as F
import network.resnet as resnet
import torchvision.transforms as transforms
from collections import OrderedDict



class block(nn.Module):
    def __init__(self,inChannel,outChannel):
        super(block, self).__init__()
        self.conv = nn.Conv2d(inChannel, outChannel, kernel_size=3, padding=1)
        self.bn = nn.BatchNorm2d(outChannel, momentum=0.1, affine=True)
        self.reLu = nn.ReLU(inplace=True)

    def forward(self, x):
        x=self.conv(x)
        x=self.bn(x)
        return self.reLu(x)

class patchNet(nn.Module):
    def __init__(self,inChannel,outChannel):
        super(patchNet, self).__init__()

        #将图像裁剪成不同的大小
        self.resizeTo512 = transforms.Resize([512, 512])
        self.resizeTo256=transforms.Resize([256,256])
        self.resizeTo128 = transforms.Resize([128, 128])
        self.resizeTo64 = transforms.Resize([64, 64])
        self.resizeTo32 = transforms.Resize([32, 32])


        #输入层
        self.head=block(3,32)


        #backbone

        self.block1 = block(32,32)
        self.block2 = block(32, 32)
        self.block3 = block(32, 32)
        self.block4 = block(32, 32)
        self.block5 = block(32, 32)
        self.block6 = block(32, 32)
        self.block7 = block(32, 32)


        # self.convBack1=nn.Conv2d(32, 32, kernel_size=3, padding=1)
        # self.bnBack1 = nn.BatchNorm2d(32, momentum=batchNorm_momentum, affine=True)
        # self.reLuBack1=nn.ReLU(inplace=True)
        #
        # self.convBack2 = nn.Conv2d(32, 32, kernel_size=3, padding=1)
        # self.bnBack2 = nn.BatchNorm2d(32, momentum=batchNorm_momentum, affine=True)
        # self.reLuBack2 = nn.ReLU(inplace=True)
        #
        # self.convBack3 = nn.Conv2d(32, 32, kernel_size=3, padding=1)
        # self.bnBack3 = nn.BatchNorm2d(32, momentum=batchNorm_momentum, affine=True)
        # self.reLuBack3 = nn.ReLU(inplace=True)
        #
        # self.convBack4 = nn.Conv2d(32, 32, kernel_size=3, padding=1)
        # self.bnBack4 = nn.BatchNorm2d(32, momentum=batchNorm_momentum, affine=True)
        # self.reLuBack4 = nn.ReLU(inplace=True)
        #
        # self.convBack5 = nn.Conv2d(32, 32, kernel_size=3, padding=1)
        # self.bnBack5 = nn.BatchNorm2d(32, momentum=batchNorm_momentum, affine=True)
        # self.reLuBack5 = nn.ReLU(inplace=True)

        '''self.convBack6 = nn.Conv2d(32, 32, kernel_size=3, padding=1)
        self.bnBack6 = nn.BatchNorm2d(32, momentum=batchNorm_momentum, affine=True)
        self.reLuBack6 = nn.ReLU(inplace=True)

        self.convBack7 = nn.Conv2d(32, 32, kernel_size=3, padding=1)
        self.bnBack7 = nn.BatchNorm2d(32, momentum=batchNorm_momentum, affine=True)
        self.reLuBack7 = nn.ReLU(inplace=True)

        self.convBack8 = nn.Conv2d(32, 32, kernel_size=3, padding=1)
        self.bnBack8 = nn.BatchNorm2d(32, momentum=batchNorm_momentum, affine=True)
        self.reLuBack8 = nn.ReLU(inplace=True)'''


        #输出层
        self.tail=block(160,32)
        # self.conv2 = nn.Conv2d(160, 32, kernel_size=3, padding=1)
        # self.bn2 = nn.BatchNorm2d(32, momentum= batchNorm_momentum,affine=True)
        # self.reLu2=nn.ReLU(inplace=True)

        #卷积成分类数
        self.out = nn.Conv2d(32, outChannel, kernel_size=3, padding=1)


    def forward(self, x):

        # Stage 1'
        #x = F.relu(self.bn1(self.conv1(x)))
        #对输入图片进行尺寸绽放
        patch512=x
        patch256=self.resizeTo256(x)
        patch128=self.resizeTo128(x)
        patch64=self.resizeTo64(x)
        patch32=self.resizeTo32(x)

        #经过权重共享的backbone

        xs=[]
        for x in [patch512,patch256,patch128,patch64,patch32]:
            # 卷积到32通道
            x=self.head(x)

            x = self.block1(x)
            x = self.block2(x)
            x = self.block3(x)
            xs.append(x)

        patch512,patch256,patch128,patch64,patch32=xs

        patch512 = self.block4(patch512)
        patch512 = self.block5(patch512)
        patch512 = self.block6(patch512)
        patch512 = self.block7(patch512)

        patch256 = self.block4(patch256)
        patch256 = self.block5(patch256)
        patch256 = self.block6(patch256)

        patch128 = self.block4(patch128)
        patch128 = self.block5(patch128)

        patch64 = self.block4(patch256)

        #上采样
        patch256=self.resizeTo512(patch256)
        patch128 = self.resizeTo512(patch128)
        patch64 = self.resizeTo512(patch64)
        patch32 = self.resizeTo512(patch32)
        #拼在一起
        x=torch.cat([patch512,patch256,patch128,patch64,patch32],dim=1)

        #输出
        x = self.tail(x)

        #卷积成输出的分类数
        x = self.out(x)

        return x
