import torch
from torch import nn

from torch.nn import Conv2d, BatchNorm2d, ReLU, MaxPool2d, Sequential, AdaptiveAvgPool2d, Linear, Flatten


# 残差块 ResNet18、ResNet34
class BasicBlock(nn.Module):
    expansion = 1  # 残差结构单元中channel不变

    def __init__(self, in_channels, out_channels, stride=1):
        super(BasicBlock, self).__init__()
        # 残差块中的第一个卷积层
        self.conv1 = Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=stride, padding=1,
                            bias=False)
        # Batch Normalization标准化处理
        self.bn1 = BatchNorm2d(out_channels)
        # ReLU激活层
        self.relu = ReLU()
        # 残差块中的第二个卷积层
        self.conv2 = Conv2d(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=1,
                            padding=1,
                            bias=False)
        # Batch Normalization标准化处理
        self.bn2 = BatchNorm2d(out_channels)

        # downsample   保证identify和output同shape，可相加
        self.downsample = None
        if stride != 1 or in_channels != out_channels:
            self.downsample = Sequential(
                Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, bias=False),
                BatchNorm2d(out_channels)
            )

    def forward(self, x):
        identify = x
        # 主分支
        output = self.conv1(x)
        output = self.bn1(output)
        output = self.relu(output)
        output = self.conv2(output)
        output = self.bn2(output)

        if self.downsample is not None:
            identify = self.downsample(x)
        # shortcut
        output += identify

        output = self.relu(output)
        return output


# 残差网络（ResNet18）    conv1从7×7,stride=2,padding=3卷积 改为3×3,stride=1,padding=1卷积
class ResNet(nn.Module):
    def __init__(self, block, num_classes=100):
        super(ResNet, self).__init__()
        self.in_channel = 64
        # conv1     input:32×32×3 -> output：32×32×64
        self.conv1 = Conv2d(3, self.in_channel, kernel_size=3, stride=1, padding=1,
                            bias=False)
        self.bn1 = BatchNorm2d(self.in_channel)
        self.relu = ReLU(inplace=True)

        # conv2_x   input:32×32×64 -> output：32×32×64
        # self.maxpool = MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.conv2_x = Sequential(
            block(in_channels=64, out_channels=64),
            block(in_channels=64, out_channels=64),
        )

        # conv3_x   input:32×32×64 -> output：16×16×128
        self.conv3_x = Sequential(
            block(in_channels=64, out_channels=128, stride=2),
            block(in_channels=128, out_channels=128),
        )

        # conv4_x   input:16×16×128 -> output：8×8×256
        self.conv4_x = Sequential(
            block(in_channels=128, out_channels=256, stride=2),
            block(in_channels=256, out_channels=256),
        )

        # conv5_x   input:8×8×256 -> output：4×4×512
        self.conv5_x = Sequential(
            block(in_channels=256, out_channels=512, stride=2),
            block(in_channels=512, out_channels=512),
        )

        # avgpool 自适应平均池化层 4×4×512->1×1×512
        self.avgpool = AdaptiveAvgPool2d((1, 1))

        # linear  全连接层：1×1×512->1×1×100
        self.fc = Linear(512 * block.expansion, num_classes)

    def forward(self, x):
        output = self.conv1(x)
        output = self.bn1(output)
        output = self.relu(output)

        # output=self.maxpool(output)
        output = self.conv2_x(output)
        output = self.conv3_x(output)
        output = self.conv4_x(output)
        output = self.conv5_x(output)

        output = self.avgpool(output)
        output = output.view(-1, 512)
        output = self.fc(output)

        return output


# ResNet18
def resnet18():
    return ResNet(BasicBlock, num_classes=100)


