import torch
import torchvision
from PIL import Image
import torch.nn.functional as F
from torch import nn

image_path = "D:\\Machine Learning\\learn_torch\\airplane.jpg"  # 注释同MLP模型
image = Image.open(image_path)
# print(image)
transform = torchvision.transforms.Compose([torchvision.transforms.Resize((32, 32)), torchvision.transforms.ToTensor()])
image = transform(image)
# print(image.shape)
image = image.cuda()


class Residual(nn.Module):  # 定义残差块，并继承Module模块
    def __init__(self, in_channel, out_channel, stride):  # 初始化残差块需要输入通道数，输出通道数，以及步长
        super(Residual, self).__init__()
        self.stride = stride  # 将步长设置为全局变量，因为stride=1时，残差块中是实线，输入的x不需要进行1*1卷积，stride=2时，残差块中是虚线，输入的x需要进行1*1卷积运算，这样才能保证通道数相同，才能相加
        self.conv1 = nn.Conv2d(in_channel, out_channel, kernel_size=3, stride=stride, padding=1)  # 残差块的第一个卷积层，输入通道、输出通道、步长提前设置好，卷积核为3*3，填充为1
        self.bn1 = nn.BatchNorm2d(out_channel)  # 进行归一化，这样可以减小梯度消失/梯度爆炸
        self.relu1 = nn.ReLU()  # relu激活
        self.conv2 = nn.Conv2d(out_channel, out_channel, kernel_size=3, stride=1, padding=1)  # 第二个卷积层，卷积核3*3，步长为1，填充为1
        self.bn2 = nn.BatchNorm2d(out_channel)  #  归一化
        self.relu2 = nn.ReLU()  # relu激活

        self.conv3 = nn.Conv2d(in_channel, out_channel, kernel_size=1, stride=stride)  # 第三个卷积层，这一层只有当初始步长为1时，才对输入的x使用，因为这时残差块中是实线，需要统一输出的大小

    def forward(self, x):  # 前向传播
        out = self.relu1(self.bn1(self.conv1(x)))  # 卷积运算+归一化+激活
        out = self.bn2(self.conv2(out))  # 卷积运算+归一化 ！！！注意，这个时候先不激活
        if self.stride != 1:  # 如果stride=2，x需要进行卷积，否则不用
            x = self.conv3(x)
        out = self.relu2(out + x)  # 将out和x相加后再relu激活
        return out


class RESNET(nn.Module):  # 定义网络模型
    def __init__(self, num_classes=10):  # 类别设为10
        super(RESNET, self).__init__()
        self.conv1 = nn.Conv2d(1, 64, kernel_size=1)  # 进入残差块之前先进行一次卷积
        self.maxpool1 = nn.MaxPool2d(3, stride=2, padding=1)  # 再进行一次最大池化

        self.resblock1 = Residual(64, 64, 1)  # 经过8个残差块
        self.resblock2 = Residual(64, 64, 1)
        self.resblock3 = Residual(64, 128, 2)  # 当输出通道数改变时，stride=2
        self.resblock4 = Residual(128, 128, 1)
        self.resblock5 = Residual(128, 256, 2)
        self.resblock6 = Residual(256, 256, 1)
        self.resblock7 = Residual(256, 512, 2)
        self.resblock8 = Residual(512, 512, 1)

        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))  # 平均池化，这里AdaptiveAvgPool2d((1,1))表示，不管输入多大，输出大小始终为1*1，此步骤的结果为1*1*512
        self.flatten = nn.Flatten()  # 压平
        self.fc = nn.Linear(512, num_classes)  # 线性运算，输出10类

    def forward(self, x):  # 前向传播
        x = self.maxpool1(self.conv1(x))  # 先经过一次卷积+最大池化

        x = self.resblock1(x)  # 经过八个残差块
        x = self.resblock2(x)
        x = self.resblock3(x)
        x = self.resblock4(x)
        x = self.resblock5(x)
        x = self.resblock6(x)
        x = self.resblock7(x)
        x = self.resblock8(x)

        x = self.avgpool(x)  # 平均池化
        x = self.flatten(x)  # 压平
        x = F.softmax(self.fc(x), dim=1)  # softmax激活

        return x


test_model = torch.load("resnet18_99.pth")
test_model = test_model.cuda()
test_model.eval()
with torch.no_grad():
    image = torch.reshape(image, (1, 3, 32, 32))
    output = test_model(image)
print("预测的结果为：{}".format(output.argmax(1).item()))
