import torch
import torch.nn as nn
import torch.nn.functional as F
import network.resnet as resnet
import torchvision.transforms as transforms
from collections import OrderedDict



class block(nn.Module):
    def __init__(self,inChannel,outChannel,kernel_size=3,padding=1):
        super(block, self).__init__()
        self.conv = nn.Conv2d(inChannel, outChannel, kernel_size=kernel_size, padding=padding)
        self.bn = nn.BatchNorm2d(outChannel, momentum=0.1, affine=True)
        self.reLu = nn.ReLU(inplace=False)

    def forward(self, x):
        x=self.conv(x)
        x=self.bn(x)
        return self.reLu(x)

class patchNet(nn.Module):
    def __init__(self,inChannel,outChannel):
        super(patchNet, self).__init__()
        #将图像裁剪成不同的大小
        self.resizeTo512 = transforms.Resize([512, 512])
        self.resizeTo256=transforms.Resize([256,256])
        self.resizeTo128 = transforms.Resize([128, 128])
        self.resizeTo64 = transforms.Resize([64, 64])
        self.resizeTo32 = transforms.Resize([32, 32])
        #输入层
        self.head=block(3,32)
        #backbone
        # self.block1=block(32,32)
        # self.block2 = block(32, 32)
        # self.block3 = block(32, 32)
        # self.block4 = block(32, 32)
        # self.block5 = block(32, 32)
        # self.block6 = block(32, 32)
        # self.block7 = block(32, 32)
        # self.block8 = block(32, 32)
        # self.block9 = block(32, 32)
        # self.block10 = block(32, 32)
        self.backbone1=nn.Sequential(block(32,32),
                                    block(32, 32),
                                    block(32, 32),
                                    block(32, 32),
                                    block(32, 32),
                                    block(32, 32),
                                    block(32, 32)
                                    )
        self.backbone2 = nn.Sequential(block(32, 32),
                                       block(32, 32),
                                       block(32, 32),
                                       block(32, 32),
                                       block(32, 32),
                                       block(32, 32),
                                       block(32, 32)
                                       )
        self.backbone3 = nn.Sequential(block(32, 32),
                                       block(32, 32),
                                       block(32, 32),
                                       block(32, 32),
                                       block(32, 32),
                                       block(32, 32),
                                       block(32, 32)
                                       )
        #输出层
        self.tail=block(160,32,kernel_size=1,padding=0)

        #卷积成分类数
        self.out = nn.Conv2d(32, outChannel, kernel_size=1,padding=0)


    def forward(self, input):

#多尺度权重共享的self attention--------------------------------------

        #对输入图片进行尺寸绽放
        patch512=input
        patch256=self.resizeTo256(input)
        patch128=self.resizeTo128(input)
        patch64=self.resizeTo64(input)
        patch32=self.resizeTo32(input)

        #经过权重共享的backbone
        xs=[]
        for x in [patch64,patch32]:
            # 卷积到32通道
            x=self.head(x)

            #通过backbone
            x=self.backbone1(x)
            # x = self.block1(x)
            # x = self.block2(x)
            # x = self.block3(x)
            # x = self.block4(x)
            # x = self.block5(x)

            xs.append(x)
        patch64,patch32=xs
        xs = []
        for x in [patch256, patch128]:
            # 卷积到32通道
            x = self.head(x)

            # 通过backbone
            x = self.backbone2(x)
            # x = self.block1(x)
            # x = self.block2(x)
            # x = self.block3(x)
            # x = self.block4(x)
            # x = self.block5(x)

            xs.append(x)
        patch256, patch128 = xs


        patch512 = self.head(patch512)
        # 通过backbone
        patch512 = self.backbone3(patch512)

        #上采样
        patch256=self.resizeTo512(patch256)
        patch128 = self.resizeTo512(patch128)
        patch64 = self.resizeTo512(patch64)
        patch32 = self.resizeTo512(patch32)
        # attention

        #拼在一起
        # x=patch512+patch32
        x=torch.cat([patch512,patch256,patch128,patch64,patch32],dim=1)
        x=self.tail(x)
        x=self.out(x)
        #输出


        return x
