import torch
import torch.nn as nn
import torch.nn.functional as F
import network.resnet as resnet
import torchvision.transforms as transforms
from collections import OrderedDict

class ResNetBasicBlock(nn.Module):
    def __init__(self, in_channels, out_channels, stride):
        super(ResNetBasicBlock, self).__init__()
        self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1)
        self.bn1 = nn.BatchNorm2d(out_channels)
        self.reLu1=nn.ReLU()
        self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=stride, padding=1)
        self.bn2 = nn.BatchNorm2d(out_channels)
        self.reLu2=nn.ReLU()

    def forward(self, x):
        output = self.conv1(x)
        output = self.reLu1(self.bn1(output))
        output = self.conv2(output)
        output = self.bn2(output)
        return self.reLu2(output)

#基于多尺度权重共享注意力的非下采样网络
class NDSNet(nn.Module):
    def __init__(self,inChannel,outChannel):
        super(NDSNet, self).__init__()
        #self.backbone=resnet.resnet101(outChannel=32)
        self.resizeTo512 = transforms.Resize([512, 512])
        self.resizeTo256=transforms.Resize([256,256])
        self.resizeTo128 = transforms.Resize([128, 128])
        self.resizeTo64 = transforms.Resize([64, 64])
        self.resizeTo32 = transforms.Resize([32, 32])

        batchNorm_momentum = 0.1
        #输入
        self.conv1 = nn.Conv2d(inChannel, 32, kernel_size=3, padding=1)
        self.bn1 = nn.BatchNorm2d(32, momentum= batchNorm_momentum,affine=True)
        self.reLu1 = nn.ReLU(inplace=True)

        #backbone
        self.backbone=nn.Sequential(ResNetBasicBlock(32, 32, 1),
                                    ResNetBasicBlock(32, 32, 1),
                                    ResNetBasicBlock(32, 32, 1),
                                    ResNetBasicBlock(32, 32, 1),
                                    ResNetBasicBlock(32, 32, 1),
                                    # ResNetBasicBlock(32, 32, 1),
                                    # ResNetBasicBlock(32, 32, 1),
                                    # ResNetBasicBlock(32, 32, 1),
                                    # ResNetBasicBlock(32, 32, 1)

                                    )


        #输出
        self.conv2 = nn.Conv2d(160, 32, kernel_size=3, padding=1)
        self.bn2 = nn.BatchNorm2d(32, momentum= batchNorm_momentum,affine=True)
        self.reLu2=nn.ReLU(inplace=True)

        self.conv3 = nn.Conv2d(32, outChannel, kernel_size=3, padding=1)




    def forward(self, x):

        # Stage 1'
        #x = F.relu(self.bn1(self.conv1(x)))
        #对输入图片进行尺寸缩放
        patch512=x
        patch256=self.resizeTo256(x)
        patch128=self.resizeTo128(x)
        patch64=self.resizeTo64(x)
        patch32=self.resizeTo32(x)



        #经过权重共享的backbone,只要最后一层的输出
        xs=[]
        for x in [patch512,patch256,patch128,patch64,patch32]:
            # 卷积到32通道
            x=self.conv1(x)
            x = self.bn1(x)
            x = self.reLu1(x)
            #经过backbone
            x=self.backbone(x)

            xs.append(x)

        patch512,patch256,patch128,patch64,patch32=xs
        #上采样
        patch256=self.resizeTo512(patch256)
        patch128 = self.resizeTo512(patch128)
        patch64 = self.resizeTo512(patch64)
        patch32 = self.resizeTo512(patch32)

        # x=torch.cat([patch512,patch256,patch128,patch64,patch32],dim=1)
        x=patch512+patch256+patch128+patch64+patch32

        # x = self.reLu2(self.bn2(self.conv2(x)))
        x = self.conv3(x)

        return x
