# # 代码2-17
# import torch
# from torchsummary import summary
#
# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
#
# class model1(torch.nn.Module):
#     def __init__(self):
#         super(model1, self).__init__()
#         # 定义第一个线性层，输入维度为3，输出维度为5
#         self.linearl = torch.nn.Linear(3, 5)
#         # 定义第二个线性层，输入维度为5，输出维度为2
#         self.linear2 = torch.nn.Linear(5, 2)
#
#     def forward(self, x):
#         # 前向传播函数，定义数据如何通过各层
#         x = self.linearl(x)
#         x = self.linear2(x)
#         return x
#
# # 实例化model1并移动到指定设备
# model = model1().to(device)
# # 打印模型摘要信息，输入尺寸为(8, 3)
# summary(model, (8, 3))
#
#
# # 代码2-18
# model2 = torch.nn.Sequential(torch.nn.Linear(3, 5), torch.nn.Linear(5, 2))
#
# model = model2.to(device)
# summary(model, (18, 3))

# # 代码2-19
# # 经典CNN网络
# import torch
# import torch.nn as nn
# import torch.nn.functional as F
#
# # 定义一个卷积神经网络类，继承自torch.nn.Module
# class CNN(nn.Module):
#     def __init__(self, num_classes=2):
#         super(CNN, self).__init__()
#         # 第一个卷积层，输入通道数为3（例如RGB图像），输出通道数为16，卷积核大小为3x3，填充为1
#         self.conv1 = nn.Conv2d(3, 16, 3, padding=1)
#         # 最大池化层，池化窗口大小为2x2
#         self.pool = nn.MaxPool2d(2, 2)
#         # 第二个卷积层，输入通道数为16，输出通道数也为16，卷积核大小为3x3，填充为1
#         self.conv2 = nn.Conv2d(16, 16, 3, padding=1)
#         # 第二个最大池化层，池化窗口大小为2x2
#         self.pool = nn.MaxPool2d(2, 2)
#         # 全连接层，输入特征数为16*64*64，输出特征数为num_classes（默认为2）
#         self.output = nn.Linear(16 * 64 * 64, num_classes)
#         # Dropout层，丢弃概率为0.5
#         self.dp1 = nn.Dropout(p=0.5)
#
#     def forward(self, x):
#         # 通过第一个卷积层和ReLU激活函数，然后进行最大池化
#         x = self.pool(F.relu(self.conv1(x)))
#         # 通过第二个卷积层和ReLU激活函数，然后进行最大池化
#         x = self.pool(F.relu(self.conv2(x)))
#         # 将多维张量展平成二维张量
#         temp = x.view(x.size()[0], -1)
#         # 应用Dropout层
#         x = self.dp1(x)
#         # 通过全连接层得到最终的输出
#         output = self.output(temp)
#         return output, x
#
# # 实例化CNN模型
# model = CNN()
# print(model)
#
# # 创建一个随机输入张量，模拟一个批次大小为1的图像数据，图像尺寸为256x256，有3个颜色通道
# input_tensor = torch.randn(1, 3, 256, 256)
#
# # 前向传播，获取输出结果
# output, features = model(input_tensor)
#
# # 打印输出结果的形状
# print("Output shape:", output.shape)
# print("Features shape:", features.shape)

# 代码2-21
# AlexNet网络
import torch
import torch.nn as nn
import torch.nn.functional as F
class AlexNet(nn.Module):

    def __init__(self):
        super(AlexNet,self).__init__()
        self.conv1 = nn.Conv2d(3, 32, 3)
        self.pool = nn.MaxPool2d(2, 2)
        self.conv2 = nn.Conv2d(32, 64, 3)
        self.pool = nn.MaxPool2d(2, 2)
        self.conv3 = nn.Conv2d(64, 128, 3)
        self.pool = nn.MaxPool2d(2, 2)
        self.conv4 = nn.Conv2d(128, 256, 3)
        self.pool = nn.MaxPool2d(2, 2)
        self.conv5 = nn.Conv2d(256, 512, 3)
        self.pool = nn.MaxPool2d(2, 2)
        self.output = nn.Linear(in_features=512 * 6 * 6, out_features=2)
        self.dp1 = nn.Dropout(p=0.5)

    def forward(self, x):
        x = self.pool(F.relu(self.conv1(x)))
        x = self.pool(F.relu(self.conv2(x)))
        x = self.pool(F.relu(self.conv3(x)))
        x = self.pool(F.relu(self.conv4(x)))
        x = self.pool(F.relu(self.conv5(x)))
        temp = x.view(x.shape[0], -1)
        # 应用Dropout层
        x = self.dp1(x)
        # 通过全连接层得到最终的输出
        output = self.output(temp)
        return output, x

