import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from typing import Optional

from rainforeLearn.gomoku.v2.util.model_summary import conv3x3
from rainforeLearn.gomoku.v2.neywork.attention import CBAM, SelfAttention2D


class ResidualBlock(nn.Module):
    """增强残差块 - 集成注意力机制"""

    def __init__(self, in_channels: int, out_channels: int,
                 stride: int = 1, use_attention: Optional[str] = 'cbam'):
        super().__init__()

        # 主要卷积层
        self.conv1 = conv3x3(in_channels, out_channels, stride)
        self.bn1 = nn.BatchNorm2d(out_channels)
        self.conv2 = conv3x3(out_channels, out_channels)
        self.bn2 = nn.BatchNorm2d(out_channels)
        self.relu = nn.ReLU(inplace=True)

        # 下采样层（如果需要）
        self.downsample = self._build_downsample_layer(in_channels, out_channels, stride)

        # 注意力机制
        self.attention = self._build_attention_layer(out_channels, use_attention)

    def _build_downsample_layer(self, in_channels: int, out_channels: int, stride: int) -> Optional[nn.Sequential]:
        """构建下采样层"""
        if in_channels != out_channels or stride != 1:
            return nn.Sequential(
                conv3x3(in_channels, out_channels, stride),
                nn.BatchNorm2d(out_channels)
            )
        return None

    def _build_attention_layer(self, channels: int, attention_type: Optional[str]) -> Optional[nn.Module]:
        """构建注意力层"""
        if attention_type == 'cbam':
            return CBAM(channels)
        elif attention_type == 'self_attention':
            return SelfAttention2D(channels)
        return None

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        residual = x

        # 主要路径
        out = self.relu(self.bn1(self.conv1(x)))
        out = self.bn2(self.conv2(out))

        # 残差连接
        if self.downsample is not None:
            residual = self.downsample(residual)

        out = self.relu(out + residual)

        # 应用注意力机制
        if self.attention is not None:
            out = self.attention(out)

        return out


class NoisyLinear(nn.Module):
    """带参数化噪声的线性层 - 增强探索能力"""

    def __init__(self, in_features: int, out_features: int, std_init: float = 0.5):
        super().__init__()
        self.in_features = in_features
        self.out_features = out_features
        self.std_init = std_init

        # 权重参数
        self.weight_mu = nn.Parameter(torch.FloatTensor(out_features, in_features))
        self.weight_sigma = nn.Parameter(torch.FloatTensor(out_features, in_features))
        self.register_buffer('weight_epsilon', torch.FloatTensor(out_features, in_features))

        # 偏置参数
        self.bias_mu = nn.Parameter(torch.FloatTensor(out_features))
        self.bias_sigma = nn.Parameter(torch.FloatTensor(out_features))
        self.register_buffer('bias_epsilon', torch.FloatTensor(out_features))

        self.reset_parameters()
        self.reset_noise()

    def reset_parameters(self) -> None:
        """初始化参数"""
        mu_range = 1 / math.sqrt(self.in_features)

        self.weight_mu.data.uniform_(-mu_range, mu_range)
        self.weight_sigma.data.fill_(self.std_init / math.sqrt(self.in_features))

        self.bias_mu.data.uniform_(-mu_range, mu_range)
        self.bias_sigma.data.fill_(self.std_init / math.sqrt(self.out_features))

    def reset_noise(self) -> None:
        """重置噪声"""
        epsilon_in = self._scale_noise(self.in_features)
        epsilon_out = self._scale_noise(self.out_features)

        self.weight_epsilon.copy_(epsilon_out.ger(epsilon_in))
        self.bias_epsilon.copy_(epsilon_out)

    def _scale_noise(self, size: int) -> torch.Tensor:
        """生成缩放噪声"""
        x = torch.randn(size)
        return x.sign().mul_(x.abs().sqrt_())

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        if self.training:
            weight = self.weight_mu + self.weight_sigma * self.weight_epsilon
            bias = self.bias_mu + self.bias_sigma * self.bias_epsilon
        else:
            weight = self.weight_mu
            bias = self.bias_mu

        return F.linear(x, weight, bias)
