import torch
import torch.nn as nn
import math

class ECALayer(nn.Module):
    """高效通道注意力模块（混合精度优化版）"""
    def __init__(self, channels, gamma=2, b=1):
        super().__init__()
        # 自适应计算卷积核大小
        kernel_size = int(abs(math.log(channels, 2) + b) / gamma)
        kernel_size = max(3, kernel_size + 1) if kernel_size % 2 else kernel_size + 1
        
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.conv = nn.Conv1d(1, 1, kernel_size, 
                             padding=kernel_size//2, bias=False)
        self.sigmoid = nn.Sigmoid()

    def forward(self, x):
        # 禁用混合精度（关键层保持float32）
        with torch.autocast(device_type='cuda', enabled=False):
            y = self.avg_pool(x).squeeze(-1).transpose(1, 2)
            y = self.conv(y).transpose(1, 2).unsqueeze(-1)
            y = self.sigmoid(y)
        return x * y.expand_as(x)

class ResidualBlock(nn.Module):
    """优化后的残差块（单ECA层）"""
    def __init__(self, channels=256):
        super().__init__()
        self.conv1 = nn.Conv2d(channels, channels, 3, padding=1, bias=False)
        self.bn1 = nn.BatchNorm2d(channels)
        self.relu = nn.LeakyReLU(0.1, inplace=True)
        self.eca = ECALayer(channels)  # 单注意力层
        self.conv2 = nn.Conv2d(channels, channels, 3, padding=1, bias=False)
        self.bn2 = nn.BatchNorm2d(channels)

    def forward(self, x):
        residual = x
        out = self.conv1(x)
        out = self.bn1(out)
        out = self.relu(out)
        out = self.eca(out)  # 注意力在激活后
        out = self.conv2(out)
        out = self.bn2(out)
        out += residual
        return self.relu(out)

class GomokuNet(nn.Module):
    def __init__(self, res_blocks=10):
        super().__init__()
        # 初始卷积层（优化初始化）
        self.conv_init = nn.Sequential(
            nn.Conv2d(2, 256, 3, padding=1, bias=False),
            nn.BatchNorm2d(256),
            nn.LeakyReLU(0.1, inplace=True)
        )
        
        # 优化后的残差块（单ECA层）
        self.res_blocks = nn.ModuleList([
            nn.Sequential(
                nn.Conv2d(256, 256, 3, padding=1, bias=False),
                nn.BatchNorm2d(256),
                nn.LeakyReLU(0.1, inplace=True),
                ECALayer(256),  # 单层注意力
                nn.Conv2d(256, 256, 3, padding=1, bias=False),
                nn.BatchNorm2d(256)
            ) 
            for _ in range(res_blocks)
        ])
        
        # 策略头（轻量化设计）
        self.policy_head = nn.Sequential(
            nn.Conv2d(256, 64, 1, bias=False),
            nn.BatchNorm2d(64),
            nn.LeakyReLU(0.1, inplace=True),
            nn.Flatten(),
            nn.Linear(64*15*15, 225)
        )
        
        # 价值头（全局池化优化）
        self.value_head = nn.Sequential(
            nn.Conv2d(256, 32, 1, bias=False),
            nn.BatchNorm2d(32),
            nn.LeakyReLU(0.1, inplace=True),
            nn.AdaptiveAvgPool2d(1),
            nn.Flatten(),
            nn.Linear(32, 64),
            nn.LeakyReLU(0.1, inplace=True),
            nn.Linear(64, 1)
        )
        
        # 权重初始化（Kaiming初始化）
        self.apply(self._init_weights)

    def _init_weights(self, module):
        if isinstance(module, nn.Conv2d):
            nn.init.kaiming_normal_(module.weight, mode='fan_out', nonlinearity='leaky_relu')
        elif isinstance(module, nn.Linear):
            nn.init.xavier_normal_(module.weight)
            if module.bias is not None:
                nn.init.constant_(module.bias, 0)
        elif isinstance(module, nn.BatchNorm2d):
            nn.init.constant_(module.weight, 1)
            nn.init.constant_(module.bias, 0)

    def forward(self, x):
        x = self.conv_init(x)
        residual = x
        
        # 残差块前向传播（优化信息流）
        for block in self.res_blocks:
            identity = residual
            x = block(x)
            x = torch.relu(x + identity)  # 先相加再激活
            residual = x
        
        # 策略和价值输出
        policy = self.policy_head(x)
        value = self.value_head(x)
        
        return (
            torch.log_softmax(policy, dim=1),
            torch.tanh(value)
        )