import torch
import torch.nn as nn
import torch.nn.functional as F

# LayerNorm 在通道维度上应用
class LayerNorm(nn.Module):
    def __init__(self, dim, eps=1e-6):
        super().__init__()
        self.weight = nn.Parameter(torch.ones(dim))
        self.bias = nn.Parameter(torch.zeros(dim))
        self.eps = eps

    def forward(self, x):
        u = x.mean(1, keepdim=True)
        s = (x - u).pow(2).mean(1, keepdim=True)
        x = (x - u) / torch.sqrt(s + self.eps)
        return self.weight[:, None, None] * x + self.bias[:, None, None]

# Conv + BN + Activation
class ConvBN(nn.Module):
    def __init__(self, inp, oup, kernel_size, stride, pad, groups=1):
        super().__init__()
        self.conv = nn.Conv2d(inp, oup, kernel_size, stride, pad, groups=groups, bias=False)
        self.bn = nn.BatchNorm2d(oup)
        self.act = nn.GELU()

    def forward(self, x):
        return self.act(self.bn(self.conv(x)))

# GhostConv 模块：降低计算量
class GhostConv(nn.Module):
    def __init__(self, inp, oup, kernel_size=1, ratio=2, stride=1, relu=True):
        super().__init__()
        init_channels = int(oup / ratio)
        new_channels = init_channels * (ratio - 1)
        self.primary_conv = nn.Sequential(
            nn.Conv2d(inp, init_channels, kernel_size, stride, kernel_size // 2, bias=False),
            nn.BatchNorm2d(init_channels),
            nn.ReLU(inplace=True) if relu else nn.Identity(),
        )
        self.cheap_operation = nn.Sequential(
            nn.Conv2d(init_channels, new_channels, 3, 1, 1, groups=init_channels, bias=False),
            nn.BatchNorm2d(new_channels),
            nn.ReLU(inplace=True) if relu else nn.Identity(),
        )

    def forward(self, x):
        x1 = self.primary_conv(x)
        x2 = self.cheap_operation(x1)
        return torch.cat([x1, x2], dim=1)

# HorNet Block（简化）
class HorNetBlock(nn.Module):
    def __init__(self, dim):
        super().__init__()
        self.conv1 = GhostConv(dim, dim)
        self.norm = nn.BatchNorm2d(dim)
        self.act = nn.GELU()

    def forward(self, x):
        return self.act(self.norm(self.conv1(x)))

# HorNet-Tiny-GF 模型定义
class HorNetTinyGF(nn.Module):
    def __init__(self, num_classes=100):
        super().__init__()
        dims = [64, 128, 320, 512]
        depths = [2, 2, 6, 2]

        # 修改为适配 CIFAR-100 的 stem
        self.stem = nn.Sequential(
            nn.Conv2d(3, dims[0], kernel_size=2, stride=2),  # 32x32 -> 16x16
            LayerNorm(dims[0])
        )

        # 创建多个 Stage
        self.stages = nn.ModuleList()
        for i in range(4):
            blocks = []
            for _ in range(depths[i]):
                blocks.append(HorNetBlock(dims[i]))
            stage = nn.Sequential(*blocks)
            self.stages.append(stage)

        # Downsample layers between stages
        self.downsamples = nn.ModuleList()
        for i in range(3):
            self.downsamples.append(nn.Conv2d(dims[i], dims[i+1], kernel_size=2, stride=2))  # 下采样 16→8→4→2

        # 分类头
        self.global_pool = nn.AdaptiveAvgPool2d(1)
        self.head = nn.Linear(dims[-1], num_classes)

    def forward(self, x):
        x = self.stem(x)  # 16x16

        for i in range(4):
            x = self.stages[i](x)
            if i < 3:
                x = self.downsamples[i](x)

        x = self.global_pool(x).flatten(1)
        x = self.head(x)
        return x

# 构建模型函数
def hornet_cifar(num_classes=100):
    return HorNetTinyGF(num_classes=num_classes)