import torch
import torch.nn as nn
import torch_geometric.nn as geom_nn

class TemporalConvBlock(nn.Module):
    """时间卷积块"""
    def __init__(self, in_channels, out_channels, kernel_size, dilation):
        super().__init__()
        self.conv = nn.Conv1d(
            in_channels, 
            out_channels, 
            kernel_size, 
            padding=(kernel_size-1)*dilation//2,
            dilation=dilation
        )
        self.norm = nn.BatchNorm1d(out_channels)
        self.activation = nn.ReLU()
        self.dropout = nn.Dropout(0.1)
        
    def forward(self, x):
        # x: [batch_size, seq_len, features]
        x = x.transpose(1, 2)  # [batch_size, features, seq_len]
        x = self.conv(x)
        x = self.norm(x)
        x = self.activation(x)
        x = self.dropout(x)
        return x.transpose(1, 2)  # [batch_size, seq_len, features]

class STGNN(nn.Module):
    def __init__(self, config):
        super().__init__()
        self.config = config
        
        # 空间图卷积层
        self.spatial_convs = nn.ModuleList([
            geom_nn.ChebConv(config['node_dim'], config['hidden_dim'], K=2)
            for _ in range(config['num_layers'])
        ])
        
        # 时间卷积层
        self.temporal_convs = nn.ModuleList([
            TemporalConvBlock(config['hidden_dim'], config['hidden_dim'], 
                            kernel_size=config['temporal_conv']['kernel_size'],
                            dilation=d)
            for d in config['temporal_conv']['dilation']
        ])
        
        # 多头图注意力
        self.attention = geom_nn.GATConv(config['hidden_dim'], config['hidden_dim'] // config['num_heads'],
                                       heads=config['num_heads'], dropout=config['dropout'])
        
        # 残差连接
        self.residual = nn.Linear(config['node_dim'], config['hidden_dim'])
        
        self.dropout = nn.Dropout(config['dropout'])
        
    def forward(self, x, edge_index, edge_attr=None):
        """
        Args:
            x: [num_nodes, node_dim]
            edge_index: [2, num_edges]
            edge_attr: [num_edges, edge_dim]
        """
        # 残差连接
        residual = self.residual(x)
        
        # 空间图卷积
        spatial_features = x
        for conv in self.spatial_convs:
            spatial_features = conv(spatial_features, edge_index, edge_attr)
            spatial_features = F.relu(spatial_features)
            spatial_features = self.dropout(spatial_features)
        
        # 时间卷积（需要将图数据转换为序列形式）
        # 这里简化处理，实际需要根据时间维度进行处理
        batch_size = 1  # 简化，实际需要根据数据确定
        seq_len = 1
        
        temporal_features = spatial_features.unsqueeze(1)  # [num_nodes, 1, hidden_dim]
        for t_conv in self.temporal_convs:
            temp_out = t_conv(temporal_features)
            temporal_features = temporal_features + temp_out  # 残差连接
        
        # 图注意力
        attention_features = self.attention(temporal_features.squeeze(1), edge_index)
        
        # 最终输出
        output = attention_features + residual
        
        return output