# 导入必要库
import torch
import torch.nn as nn
import torch.nn.functional as F

# 4维张量的各类卷积操作
# def conv_dsc(in_ch, out_ch, stride):
#     conv_dsc = nn.Sequential(
#         nn.Conv2d(in_ch, in_ch, kernel_size=3, stride=stride, padding=1, groups=in_ch),
#         nn.Conv2d(in_ch, out_ch, kernel_size=1)
#     )
#     return conv_dsc
#
# # 创建4维张量（batch=2, channels=3, height=16, width=16）
# x = torch.randn(2, 3, 16, 16)
# print("原始张量形状:", x.shape)
#
# conv_regular = nn.Conv2d(in_channels=3, out_channels=8, kernel_size=3, stride=1, padding=1)
# out_regular = conv_regular(x)
# print("常规卷积输出形状:", out_regular.shape)
#
# conv_dw = nn.Conv2d(in_channels=3, out_channels=3, kernel_size=3, stride=1, padding=1, groups=3)
# out_dw = conv_dw(x)
# print("逐通道卷积（DW）输出形状:", out_dw.shape)
#
# conv_pw = nn.Conv2d(in_channels=3, out_channels=16, kernel_size=1, stride=1)
# out_pw = conv_pw(x)
# print("逐点卷积（PW）输出形状:", out_pw.shape)
#
# conv_dsc_layer = conv_dsc(in_ch=3, out_ch=12, stride=1)
# out_dsc = conv_dsc_layer(x)
# print("深度可分离卷积输出形状:", out_dsc.shape)


# 卷积后批量归一化
# import torch
# import torch.nn as nn
#
# x = torch.randn(4, 4, 12, 12)
# print("原始张量形状:", x.shape)
#
# conv_layer = nn.Conv2d(in_channels=4, out_channels=6, kernel_size=3, stride=1, padding=1)
# bn_layer = nn.BatchNorm2d(num_features=6)
#
# out_conv = conv_layer(x)
# out_bn = bn_layer(out_conv)
#
# print("卷积后输出形状:", out_conv.shape)
# print("批量归一化后输出形状:", out_bn.shape)
#
# print("卷积后各通道均值:", out_conv.detach().mean(dim=[0,2,3]).numpy())
# print("批量归一化后各通道均值:", out_bn.detach().mean(dim=[0,2,3]).numpy())


# 卷积 + 批量归一化 + 激活函数

# x = torch.randn(2, 3, 20, 20)
# print("原始张量形状:", x.shape)
#
# seq = nn.Sequential(
#     nn.Conv2d(in_channels=3, out_channels=10, kernel_size=3, stride=1, padding=1),
#     nn.BatchNorm2d(num_features=10),
#     nn.ReLU(inplace=True)
# )
#
# out_seq = seq(x)
#
# print("最终输出形状:", out_seq.shape)
# print("激活后最小值:", out_seq.min().item())

# 4 维张量的各类池化操作

# x = torch.randn(2, 8, 16, 16)
# print("原始张量形状:", x.shape)
#
# max_pool = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)  # 最大值池化（2x2窗口，步长2）
# avg_pool = nn.AvgPool2d(kernel_size=2, stride=2, padding=0)  # 均值池化（2x2窗口，步长2）
# global_max_pool = nn.AdaptiveMaxPool2d(output_size=(1,1))  # 全局最大池化（输出1x1）
# global_avg_pool = nn.AdaptiveAvgPool2d(output_size=(1,1))  # 全局均值池化（输出1x1）
#
#
# out_max = max_pool(x)
# out_avg = avg_pool(x)
# out_global_max = global_max_pool(x)
# out_global_avg = global_avg_pool(x)
#
# print("最大值池化输出形状:", out_max.shape)
# print("均值池化输出形状:", out_avg.shape)
# print("全局最大池化输出形状:", out_global_max.shape)
# print("全局均值池化输出形状:", out_global_avg.shape)
#
# print("全局最大池化展平结果:\n", out_global_max.squeeze().numpy())

# 卷积 + 批量归一化 + 激活 + 池化 + 全连接

# x = torch.randn(3, 3, 32, 32)
# print("原始张量形状:", x.shape)
#
# class ConvPoolFC(nn.Module):
#     def __init__(self):
#         super(ConvPoolFC, self).__init__()
#         # 特征提取部分
#         self.features = nn.Sequential(
#             nn.Conv2d(3, 16, 3, 1, 1),  # 卷积：3->16通道，3x3窗口
#             nn.BatchNorm2d(16),  # 批量归一化
#             nn.ReLU(inplace=True),  # ReLU激活
#             nn.MaxPool2d(2, 2)  # 最大值池化：32->16尺寸
#         )
#         self.fc = nn.Linear(16 * 16 * 16, 10)
#
#     def forward(self, x):
#         x = self.features(x)  # 特征提取：输出形状(3,16,16,16)
#         x = x.view(x.size(0), -1)  # 展平：(3, 16*16*16) = (3, 4096)
#         x = self.fc(x)  # 全连接：4096->10
#         return x
#
# model = ConvPoolFC()
# out = model(x)
#
# print("全连接输出形状:", out.shape)
# print("全连接输出结果:\n", out.detach().numpy())

# 基于残差结构（BasicBlock）的 CNN 搭建

# class BasicBlock(nn.Module):
#     def __init__(self, in_channels, out_channels, stride=1):
#         super(BasicBlock, self).__init__()
#         self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1, bias=False)
#         self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False)
#         # ReLU激活
#         self.relu = nn.ReLU(inplace=True)
#
#     def forward(self, x):
#         residual = x
#         out = self.conv1(x)
#         out = self.relu(out)
#         out = self.conv2(out)
#         out += residual
#         out = self.relu(out)
#         return out
#
# class ResNetSimple(nn.Module):
#     def __init__(self, num_classes=1000):
#         super(ResNetSimple, self).__init__()
#         # 第一层：Conv2d(3->64, 3x3, stride=2, padding=1) -> 224->112
#         self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1, bias=False)
#         # 最大值池化(2x2, stride=2) -> 112->56
#         self.maxpool1 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
#         # 第二层：Conv2d(64->128, 3x3, stride=2, padding=1) ->56->28
#         self.conv2 = nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=1, bias=False)
#         # BasicBlock残差模块（128->128, stride=1）-> 28保持不变
#         self.basicblock1 = BasicBlock(128, 128)
#         # 最大值池化(2x2, stride=2) ->28->14
#         self.maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
#         # 第三层：Conv2d(128->196, 3x3, stride=2, padding=1) ->14->7
#         self.conv3 = nn.Conv2d(128, 196, kernel_size=3, stride=2, padding=1, bias=False)
#         # BasicBlock残差模块（196->196, stride=1）->7保持不变
#         self.basicblock2 = BasicBlock(196, 196)
#         # 全局均值池化 ->7x7->1x1
#         self.global_avg_pool = nn.AdaptiveAvgPool2d((1, 1))
#         # 全连接层（196->num_classes）
#         self.fc = nn.Linear(196, num_classes)
#
#     def forward(self, x):
#         # 前向传播流程
#         x = self.conv1(x)  # 224->112, 3->64
#         x = self.maxpool1(x)  # 112->56
#         x = self.conv2(x)  # 56->28, 64->128
#         x = self.basicblock1(x)  # 28保持不变
#         x = self.maxpool2(x)  # 28->14
#         x = self.conv3(x)  # 14->7, 128->196
#         x = self.basicblock2(x)  # 7保持不变
#         x = self.global_avg_pool(x)  # 7->1, 196保持
#         x = x.view(x.size(0), -1)  # 展平：(batch, 196)
#         x = self.fc(x)  # 196->num_classes
#         return x
#
#
# model = ResNetSimple(num_classes=1000)
# # 创建模拟输入：(batch=2, 3通道, 224x224)
# x = torch.randn(2, 3, 224, 224)
#
# out = model(x)
# print("模型输出形状:", out.shape)
# print("模型结构:\n", model)


import torch
import torch.nn as nn
import torch.nn.functional as F

class BasicBlock(nn.Module):
    def __init__(self, in_channels, out_channels, stride=1):
        super(BasicBlock, self).__init__()
        # 主支路：两个3*3卷积（含BN和ReLU）
        self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1, bias=False)
        self.bn1 = nn.BatchNorm2d(out_channels)
        self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False)
        self.bn2 = nn.BatchNorm2d(out_channels)

        # shortcut支路：恒等映射（输入输出通道数/尺寸一致时）
        self.shortcut = nn.Sequential()

    def forward(self, x):
        residual = x  # shortcut支路
        # 主支路前向传播
        out = F.relu(self.bn1(self.conv1(x)))
        out = self.bn2(self.conv2(out))
        # 残差连接：主支路输出 + shortcut输出
        out += residual
        out = F.relu(out)
        return out


# 搭建完整卷积神经网络
class ResNetSimple(nn.Module):
    def __init__(self, num_classes=10):
        super(ResNetSimple, self).__init__()
        # 第一层：常规卷积（3->64通道，224->112尺寸）
        self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1, bias=False)
        self.bn1 = nn.BatchNorm2d(64)
        # 第二层：最大值池化（112->56尺寸）
        self.maxpool1 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
        # 第三层：常规卷积（64->128通道，56->28尺寸）
        self.conv2 = nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=1, bias=False)
        self.bn2 = nn.BatchNorm2d(128)
        # 第四层：残差块BasicBlock（28->28尺寸，通道数保持128）
        self.basic_block1 = BasicBlock(128, 128)
        # 第五层：最大值池化（28->14尺寸）
        self.maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
        # 第六层：常规卷积（128->196通道，14->7尺寸）
        self.conv3 = nn.Conv2d(128, 196, kernel_size=3, stride=2, padding=1, bias=False)
        self.bn3 = nn.BatchNorm2d(196)
        # 第七层：残差块BasicBlock（7->7尺寸，通道数保持196）
        self.basic_block2 = BasicBlock(196, 196)
        # 全连接层（输入：7*7*196，输出：num_classes类）
        self.fc = nn.Linear(196 * 7 * 7, num_classes)

    def forward(self, x):
        # 前向传播流程：卷积->BN->ReLU->池化->...->全连接
        x = F.relu(self.bn1(self.conv1(x)))  # (batch,3,224,224) -> (batch,64,112,112)
        x = self.maxpool1(x)  # -> (batch,64,56,56)
        x = F.relu(self.bn2(self.conv2(x)))  # -> (batch,128,28,28)
        x = self.basic_block1(x)  # -> (batch,128,28,28)
        x = self.maxpool2(x)  # -> (batch,128,14,14)
        x = F.relu(self.bn3(self.conv3(x)))  # -> (batch,196,7,7)
        x = self.basic_block2(x)  # -> (batch,196,7,7)
        x = x.view(x.size(0), -1)  # 展平：(batch, 196*7*7) = (batch, 9604)
        x = self.fc(x)  # -> (batch, num_classes)
        return x


#  测试代码
if __name__ == "__main__":
    # 设置随机种子（保证结果可复现）
    torch.manual_seed(42)
    # 创建4维输入张量：(batch_size=2, channels=3, height=224, width=224)（符合RGB图像输入）
    x = torch.randn(2, 3, 224, 224)
    print("=" * 50)
    print("输入张量形状:", x.shape)
    print("=" * 50)

    # 初始化模型
    model = ResNetSimple(num_classes=10)
    # 打印模型结构
    print("模型结构:")
    print(model)
    print("=" * 50)

    # 前向传播运算（禁用梯度计算，加快速度）
    with torch.no_grad():
        output = model(x)

    # 打印输出结果
    print("全连接层输出形状:", output.shape)
    print("=" * 50)
    print("全连接层输出结果（batch_size=2，10类概率分布）:\n", output.detach().numpy())
    print("=" * 50)