import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Optional


class DepthwiseTemporal(nn.Module):
    def __init__(self, in_channels: int, d: int = 4, k: int = 9, s: int = 1):
        super().__init__()
        out_ch = in_channels * d
        self.dw = nn.Conv1d(in_channels, out_ch, kernel_size=k, stride=s, padding=k//2, groups=in_channels, bias=False)
        self.bn = nn.BatchNorm1d(out_ch)
        self.act = nn.ReLU(inplace=True)
        self.in_channels = in_channels
        self.d = d

    def forward(self, x):  # x: B,C,T
        y = self.act(self.bn(self.dw(x)))  # B,(C*d),T'
        B, Cd, T = y.shape
        y = y.view(B, self.in_channels, self.d, T)  # B,C,d,T
        y = y.permute(0, 1, 3, 2).contiguous()      # B,C,T,d
        return y


class Projector(nn.Module):
    def __init__(self, in_dim: int, out_dim: int):
        super().__init__()
        self.proj = nn.Sequential(
            nn.Linear(in_dim, out_dim),
            nn.BatchNorm1d(out_dim),
            nn.ReLU(inplace=True),
        )

    def forward(self, z_ct):  # z_ct: B,C,T,d -> return B,C,T,D
        B, C, T, d = z_ct.shape
        x = z_ct.view(B*C*T, d)
        y = self.proj(x)
        y = y.view(B, C, T, -1)
        return y


class ChannelAttention(nn.Module):
    def __init__(self, in_dim: int, hidden: int = 32):
        super().__init__()
        self.fc1 = nn.Linear(in_dim, hidden)
        self.fc2 = nn.Linear(hidden, 1)

    def forward(self, z):  # z: B,C,T,D
        B, C, T, D = z.shape
        x = z.view(B*C*T, D)
        h = F.relu(self.fc1(x), inplace=True)
        a = self.fc2(h).view(B, C, T, 1)  # unnormalized
        a = torch.softmax(a, dim=1)       # softmax on channel
        z_time = (z * a).sum(dim=1)       # B,T,D
        return z_time, a


class TemporalEncoder(nn.Module):
    def __init__(self, dim: int, hidden: int = 128, layers: int = 3):
        super().__init__()
        mods = []
        d = dim
        for _ in range(layers):
            mods += [nn.Conv1d(d, hidden, 3, padding=1), nn.BatchNorm1d(hidden), nn.ReLU(inplace=True)]
            d = hidden
        self.net = nn.Sequential(*mods)

    def forward(self, z_time):  # B,T,D -> B,T,D'
        B, T, D = z_time.shape
        x = z_time.permute(0, 2, 1).contiguous()  # B,D,T
        y = self.net(x)                            # B,H,T
        y = y.permute(0, 2, 1).contiguous()       # B,T,H
        return y


class TemporalTSCC(nn.Module):
    def __init__(self, in_channels: int, h1: int = 64, h2: int = 128, h3: int = 128, k: int = 7, dropout: float = 0.1):
        super().__init__()
        p = k // 2
        self.block = nn.Sequential(
            nn.Conv1d(in_channels, h1, kernel_size=k, padding=p, bias=False),
            nn.BatchNorm1d(h1),
            nn.ReLU(inplace=True),
            nn.Dropout(dropout),
            nn.Conv1d(h1, h2, kernel_size=k, padding=p, bias=False),
            nn.BatchNorm1d(h2),
            nn.ReLU(inplace=True),
            nn.Dropout(dropout),
            nn.Conv1d(h2, h3, kernel_size=k, padding=p, bias=False),
            nn.BatchNorm1d(h3),
            nn.ReLU(inplace=True),
        )

    def forward(self, x):  # x: B,C,T -> B,Ht,T
        return self.block(x)


class SpatialEEGNetLite(nn.Module):
    def __init__(self, num_channels: int, out_channels: int = 32):
        super().__init__()
        # 2D 空间卷积：跨 C 维（电极）聚合，保持时间维不变
        self.conv = nn.Conv2d(1, out_channels, kernel_size=(num_channels, 1), bias=False)
        self.bn = nn.BatchNorm2d(out_channels)
        self.act = nn.ReLU(inplace=True)

    def forward(self, x):  # x: B,C,T -> B,Fs,T
        x2 = x.unsqueeze(1)               # B,1,C,T
        y = self.act(self.bn(self.conv(x2)))  # B,Fs,1,T
        y = y.squeeze(2).contiguous()     # B,Fs,T
        return y


class TemporalNodeFeaturizer(nn.Module):
    def __init__(self, in_channels: int, per_channel_dim: int = 8, k: int = 7):
        super().__init__()
        self.conv = nn.Conv1d(in_channels, in_channels * per_channel_dim, kernel_size=k, padding=k//2, groups=in_channels, bias=False)
        self.bn = nn.BatchNorm1d(in_channels * per_channel_dim)
        self.act = nn.ReLU(inplace=True)
        self.in_channels = in_channels
        self.per_channel_dim = per_channel_dim

    def forward(self, x):
        y = self.act(self.bn(self.conv(x)))
        B, Cd, T = y.shape
        d = self.per_channel_dim
        y = y.view(B, self.in_channels, d, T)
        return y


class GraphConvSimple(nn.Module):
    def __init__(self, in_dim: int, out_dim: int, bias: bool = False):
        super().__init__()
        self.lin = nn.Linear(in_dim, out_dim, bias=bias)
        self.bn = nn.BatchNorm1d(out_dim)
        self.act = nn.ReLU(inplace=True)

    def forward(self, X, A):
        Y = torch.bmm(A, X)
        B, N, _ = Y.shape
        Y = self.lin(Y)
        Y = Y.view(B * N, -1)
        Y = self.bn(Y)
        Y = Y.view(B, N, -1)
        Y = self.act(Y)
        return Y


class SpatialGCNBranch(nn.Module):
    def __init__(self, num_channels: int, per_channel_dim: int = 8, gcn_hidden: int = 32, gcn_layers: int = 2, k_t: int = 7, dropout: float = 0.1):
        super().__init__()
        self.feat = TemporalNodeFeaturizer(num_channels, per_channel_dim=per_channel_dim, k=k_t)
        layers = []
        in_dim = per_channel_dim
        for _ in range(gcn_layers):
            layers.append(GraphConvSimple(in_dim, gcn_hidden))
            in_dim = gcn_hidden
        self.gcn = nn.ModuleList(layers)
        self.dropout = nn.Dropout(dropout)
        self.reduce = nn.Linear(in_dim, 1)
        self.num_channels = num_channels

    def forward(self, x):
        f = self.feat(x)
        B, C, d, T = f.shape
        ft = f.permute(0, 3, 1, 2).contiguous().view(B * T, C, d)
        Xn = F.normalize(ft, dim=-1)
        S = torch.bmm(Xn, Xn.transpose(1, 2))
        I = torch.eye(C, device=ft.device, dtype=ft.dtype).unsqueeze(0).expand(B * T, -1, -1)
        A = torch.softmax(S + I, dim=-1)
        H = ft
        for layer in self.gcn:
            H = layer(H, A)
            H = self.dropout(H)
        y = self.reduce(H)
        y = y.view(B, T, C).permute(0, 2, 1).contiguous()
        return y


class Classifier(nn.Module):
    def __init__(self, num_classes: int, dropout: float = 0.5, hidden: int = 256):
        super().__init__()
        self.head = nn.Sequential(
            nn.Dropout(dropout),
            nn.LazyLinear(hidden),
            nn.ReLU(inplace=True),
            nn.Dropout(dropout),
            nn.Linear(hidden, num_classes)
        )

    def forward(self, x):  # x: B, in_dim (flattened)
        return self.head(x)


class STSupervised(nn.Module):
    def __init__(self, in_channels: int, num_classes: int, d: int = 4, k: int = 9, s: int = 1, proj_dim: int = 64,
                 attn_hidden: int = 32, temp_hidden: int = 128, temp_layers: int = 3, dropout: float = 0.5,
                 tscc_k: int = 7, tscc_h1: int = 64, tscc_h2: int = 128, tscc_h3: int = 128,
                 spatial_out: int = 32, use_spatial: bool = False, use_temporal: bool = True,
                 use_spatial_gcn: bool = False, spatial_gcn_per_ch: int = 8, spatial_gcn_hidden: int = 32, spatial_gcn_layers: int = 2, spatial_gcn_k_t: int = 7):
        super().__init__()
        # 分支开关
        self.use_temporal = use_temporal
        self.use_spatial = use_spatial
        self.use_spatial_gcn = use_spatial_gcn
        # 时序分支：TS-TCC 风格 1D 卷积
        self.temp_tscc = TemporalTSCC(in_channels, h1=tscc_h1, h2=tscc_h2, h3=tscc_h3, k=tscc_k) if use_temporal else None
        # 空间分支：EEGNet 风格 2D 跨通道
        self.spatial = SpatialEEGNetLite(in_channels, out_channels=spatial_out) if use_spatial else None
        self.spatial_gcn = SpatialGCNBranch(in_channels, per_channel_dim=spatial_gcn_per_ch, gcn_hidden=spatial_gcn_hidden, gcn_layers=spatial_gcn_layers, k_t=spatial_gcn_k_t, dropout=dropout) if use_spatial_gcn else None
        # 分类头：对 concat 后的 (B, (Ht+Hs), T) 展平到 (B, T*(Ht+Hs)) 再 MLP 分类
        self.cls = Classifier(num_classes, dropout=dropout)

    def forward(self, x):  # x: B,C,T
        xt = self.temp_tscc(x) if (self.use_temporal and self.temp_tscc is not None) else None  # B,Ht,T
        xs = self.spatial(x) if (self.use_spatial and self.spatial is not None) else None       # B,Hs,T
        xsg = self.spatial_gcn(x) if (self.use_spatial_gcn and self.spatial_gcn is not None) else None
        if xt is None and xs is None and xsg is None:
            raise RuntimeError("Both temporal and spatial branches are disabled.")
        feats = []
        if xt is not None:
            feats.append(xt)
        if xs is not None:
            feats.append(xs)
        if xsg is not None:
            feats.append(xsg)
        h_cat = feats[0] if len(feats) == 1 else torch.cat(feats, dim=1)
        B = h_cat.size(0)
        h_flat = h_cat.permute(0, 2, 1).reshape(B, -1)  # B, T*(Ht+Hs)
        logits = self.cls(h_flat)         # B,num_classes
        return logits, {
            'xt': xt, 'xs': xs, 'xsg': xsg, 'h_cat': h_cat, 'h_flat': h_flat
        }
