# models/blocks.py

import torch
import torch.nn as nn
import numpy as np
from sklearn.ensemble import RandomForestRegressor
from sklearn.neighbors import KNeighborsRegressor
import xgboost as xgb


class ResidualBlock(nn.Module):
    def __init__(self, input_dim, hidden_dim):
        super().__init__()
        self.linear1 = nn.Linear(input_dim, hidden_dim)
        self.bn1 = nn.BatchNorm1d(hidden_dim)
        self.relu = nn.ReLU()
        self.linear2 = nn.Linear(hidden_dim, input_dim)
        self.bn2 = nn.BatchNorm1d(input_dim)

    def forward(self, x):
        identity = x
        out = self.relu(self.bn1(self.linear1(x)))
        out = self.relu(self.bn2(self.linear2(out)) + identity)
        return out


class TemporalConvBlock(nn.Module):
    def __init__(self, in_channels, out_channels, kernel_size=3):
        super().__init__()
        self.conv1 = nn.Conv1d(in_channels, out_channels, kernel_size, padding=kernel_size // 2)
        self.relu = nn.ReLU()
        self.bn = nn.BatchNorm1d(out_channels)
        self.conv2 = nn.Conv1d(out_channels, out_channels, kernel_size, padding=kernel_size // 2)
        self.bn2 = nn.BatchNorm1d(out_channels)

    def forward(self, x):
        x = x.permute(0, 2, 1)  # (B, F, T)
        out = self.conv1(x)
        out = self.relu(self.bn(out))
        out = self.conv2(out)
        out = self.relu(self.bn2(out))
        out = out.permute(0, 2, 1)
        return out.mean(dim=1)


class FeatureAttentionBlock(nn.Module):
    def __init__(self, num_tokens: int, d_model: int = 64, n_heads: int = 4, n_layers: int = 2):
        super().__init__()
        self.num_tokens = num_tokens
        self.embed = nn.Linear(1, d_model)
        # self.embed = nn.Linear(in_features=128, out_features=64)  # 正确：匹配前一层的128维输出

        encoder_layer = nn.TransformerEncoderLayer(
            d_model=d_model,
            nhead=n_heads,
            dim_feedforward=d_model * 4,
            batch_first=True,
            activation="gelu",
            norm_first=True
        )
        self.encoder = nn.TransformerEncoder(encoder_layer, num_layers=n_layers)
        self.norm = nn.LayerNorm(d_model)

    def forward(self, x):
        if x.dim() != 2 or x.size(1) != self.num_tokens:
            raise ValueError(f"输入应为 (batch, {self.num_tokens}), 实际为 {tuple(x.shape)}")
        x = x.unsqueeze(-1)
        x = self.embed(x)
        x = self.encoder(x)
        x = self.norm(x)
        return x.mean(dim=1)


class ResidualBlockGELU(nn.Module):
    def __init__(self, input_dim, hidden_dim):
        super().__init__()
        self.linear1 = nn.Linear(input_dim, hidden_dim)
        self.norm1 = nn.LayerNorm(hidden_dim)
        self.gelu = nn.GELU()
        self.linear2 = nn.Linear(hidden_dim, input_dim)
        self.norm2 = nn.LayerNorm(input_dim)

    def forward(self, x):
        identity = x
        out = self.gelu(self.norm1(self.linear1(x)))
        out = self.norm2(self.linear2(out)) + identity
        return self.gelu(out)


class XGBoostBlock(nn.Module):
    def __init__(self, input_dim):
        super().__init__()
        self.input_dim = input_dim
        self.model = xgb.XGBRegressor()

    def forward(self, x):
        if self.training:
            raise RuntimeError("XGBoostBlock 仅用于推理")
        x_np = x.detach().cpu().numpy()
        preds = self.model.predict(x_np)
        return torch.tensor(preds, dtype=torch.float32).unsqueeze(1).to(x.device)

    def fit(self, X, y):
        print("🔧 训练 XGBoostBlock...")
        self.model.fit(X, y)


class RandomForestBlock(nn.Module):
    def __init__(self, input_dim):
        super().__init__()
        self.input_dim = input_dim
        self.model = RandomForestRegressor(n_estimators=100, max_depth=10)

    def forward(self, x):
        if self.training:
            raise RuntimeError("RandomForestBlock 不能训练")
        x_np = x.detach().cpu().numpy()
        preds = self.model.predict(x_np)
        return torch.tensor(preds, dtype=torch.float32).unsqueeze(1).to(x.device)

    def fit(self, X, y):
        print("🔧 训练 RandomForestBlock...")
        self.model.fit(X, y)


class KNNBlock(nn.Module):
    def __init__(self, input_dim):
        super().__init__()
        self.input_dim = input_dim
        self.model = KNeighborsRegressor(n_neighbors=5)

    def forward(self, x):
        if self.training:
            raise RuntimeError("KNNBlock 不能训练")
        x_np = x.detach().cpu().numpy()
        preds = self.model.predict(x_np)
        return torch.tensor(preds, dtype=torch.float32).unsqueeze(1).to(x.device)

    def fit(self, X, y):
        print("🔧 训练 KNNBlock...")
        self.model.fit(X, y)


class BottleneckBlock(nn.Module):
    def __init__(self, input_dim, expansion=4):
        super().__init__()
        hidden_dim = input_dim // expansion
        self.linear1 = nn.Linear(input_dim, hidden_dim)
        self.norm1 = nn.LayerNorm(hidden_dim)
        self.linear2 = nn.Linear(hidden_dim, input_dim)
        self.norm2 = nn.LayerNorm(input_dim)
        self.gelu = nn.GELU()

    def forward(self, x):
        identity = x
        out = self.gelu(self.norm1(self.linear1(x)))
        out = self.norm2(self.linear2(out))
        return self.gelu(out + identity)


class FeedForwardBlock(nn.Module):
    def __init__(self, input_dim, hidden_dim):
        super().__init__()
        self.linear1 = nn.Linear(input_dim, hidden_dim)
        self.norm1 = nn.LayerNorm(hidden_dim)
        self.linear2 = nn.Linear(hidden_dim, input_dim)
        self.norm2 = nn.LayerNorm(input_dim)
        self.silu = nn.SiLU()

    def forward(self, x):
        out = self.silu(self.norm1(self.linear1(x)))
        out = self.norm2(self.linear2(out))
        return out


class SqueezeExcitationBlock(nn.Module):
    def __init__(self, input_dim, reduction=16):
        super().__init__()
        self.avg_pool = nn.AdaptiveAvgPool1d(1)
        self.fc1 = nn.Linear(input_dim, input_dim // reduction)
        self.fc2 = nn.Linear(input_dim // reduction, input_dim)
        self.relu = nn.ReLU()
        self.sigmoid = nn.Sigmoid()

    def forward(self, x):
        batch, channels = x.size()
        y = self.avg_pool(x.view(batch, channels, 1)).view(batch, channels)
        y = self.relu(self.fc1(y))
        y = self.sigmoid(self.fc2(y))
        return x * y.view(batch, channels)


class ConvMLPBlock(nn.Module):
    def __init__(self, input_dim, hidden_dim, kernel_size=3):
        super().__init__()
        self.conv1 = nn.Conv1d(1, hidden_dim, kernel_size, padding=kernel_size//2)
        self.conv2 = nn.Conv1d(hidden_dim, 1, kernel_size, padding=kernel_size//2)
        self.linear = nn.Linear(input_dim, input_dim)
        self.norm = nn.LayerNorm(input_dim)
        self.gelu = nn.GELU()

    def forward(self, x):
        identity = x
        x = x.unsqueeze(1)  # 添加通道维度
        x = self.gelu(self.conv1(x))
        x = self.conv2(x).squeeze(1)
        x = self.linear(x)
        x = self.norm(x + identity)
        return self.gelu(x)


class GatedLinearUnit(nn.Module):
    def __init__(self, input_dim, hidden_dim):
        super().__init__()
        self.linear = nn.Linear(input_dim, hidden_dim)
        self.gate = nn.Linear(input_dim, hidden_dim)
        self.sigmoid = nn.Sigmoid()
        self.out = nn.Linear(hidden_dim, input_dim)

    def forward(self, x):
        linear_out = self.linear(x)
        gate_out = self.sigmoid(self.gate(x))
        x = linear_out * gate_out
        return self.out(x)