import os
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# 修复matplotlib中文显示问题
import matplotlib
matplotlib.rcParams['font.sans-serif'] = ['SimHei', 'DejaVu Sans', 'Arial Unicode MS', 'sans-serif']
matplotlib.rcParams['axes.unicode_minus'] = False
plt.rcParams['font.sans-serif'] = ['SimHei', 'DejaVu Sans', 'Arial Unicode MS', 'sans-serif']
plt.rcParams['axes.unicode_minus'] = False

from sklearn.preprocessing import StandardScaler
from torch.utils.data import Dataset, DataLoader
from torch.autograd import Variable
from sklearn.neighbors import KNeighborsClassifier
import torch.nn.functional as F

# ==========================
# 1. 数据集加载与预处理（适配BSM1_WWTP_data）
# ==========================
class BSM1Dataset(Dataset):
    def __init__(self, csv_path, seq_len=10):
        data = pd.read_csv(csv_path)
        self.labels = data.iloc[:, -1].values.astype(int)
        self.data = data.iloc[:, :-1].values.astype(np.float32)
        self.seq_len = seq_len
        self.scaler = StandardScaler()
        self.data = self.scaler.fit_transform(self.data)

    def __getitem__(self, index):
        if index >= self.seq_len:
            x = self.data[index-self.seq_len:index]
        else:
            x = self.data[index:index+self.seq_len]
        x = torch.from_numpy(x).float()
        label = self.labels[index]
        return x, label

    def __len__(self):
        return len(self.labels)

# ==========================
# 2. 图神经网络组件
# ==========================
class GraphConvolution(nn.Module):
    def __init__(self, in_features, out_features):
        super(GraphConvolution, self).__init__()
        self.weight = nn.Parameter(torch.FloatTensor(in_features, out_features))
        self.bias = nn.Parameter(torch.FloatTensor(out_features))
        self.reset_parameters()
    
    def reset_parameters(self):
        nn.init.kaiming_uniform_(self.weight)
        nn.init.zeros_(self.bias)
    
    def forward(self, x, adj):
        # x: [batch, num_nodes, in_features]
        # adj: [num_nodes, num_nodes]
        support = torch.matmul(x, self.weight)  # [batch, num_nodes, out_features]
        
        # 处理批次维度
        if len(adj.shape) == 2:
            # adj: [num_nodes, num_nodes] -> [batch, num_nodes, num_nodes]
            adj = adj.unsqueeze(0).expand(x.size(0), -1, -1)
        
        output = torch.bmm(adj, support) + self.bias  # [batch, num_nodes, out_features]
        return output

# ==========================
# 3. 自适应稀疏注意力（效果最好的注意力机制）
# ==========================
class AdaptiveSparseAttention(nn.Module):
    """自适应稀疏注意力，根据序列长度和任务特性调整连接模式"""
    def __init__(self, embed_dim, num_heads, dropout=0.1, connectivity_ratio=0.5):
        super().__init__()
        self.attn = nn.MultiheadAttention(embed_dim, num_heads, dropout=dropout, batch_first=True)
        self.connectivity_ratio = connectivity_ratio  # 控制连接密度
        self._mask_cache = {}
    
    def _get_adaptive_mask(self, seq_len, device):
        """创建自适应稀疏掩码，根据序列长度调整连接密度"""
        cache_key = (seq_len, str(device), self.connectivity_ratio)
        
        if cache_key not in self._mask_cache:
            # 基础连接：自连接 + 相邻连接
            mask = torch.eye(seq_len, device=device)
            
            # 相邻连接
            if seq_len > 1:
                indices = torch.arange(seq_len - 1, device=device)
                mask[indices, indices + 1] = 1.0
                mask[indices + 1, indices] = 1.0
            
            # 根据connectivity_ratio添加更多连接
            total_possible = seq_len * seq_len
            current_connections = mask.sum().item()
            target_connections = int(total_possible * self.connectivity_ratio)
            additional_needed = max(0, target_connections - current_connections)
            
            if additional_needed > 0 and seq_len > 2:
                # 使用向量化操作添加连接
                # 按距离逐步添加连接
                for distance in range(2, seq_len):
                    if mask.sum().item() >= target_connections:
                        break
                    
                    # 添加距离为distance的连接
                    indices = torch.arange(seq_len - distance, device=device)
                    if len(indices) > 0:
                        # 随机选择一些连接（控制密度）
                        num_to_add = min(len(indices), additional_needed - int(mask.sum().item() - current_connections))
                        if num_to_add > 0:
                            selected_indices = torch.randperm(len(indices), device=device)[:num_to_add]
                            src_idx = indices[selected_indices]
                            tgt_idx = src_idx + distance
                            mask[src_idx, tgt_idx] = 1.0
                            mask[tgt_idx, src_idx] = 1.0
            
            self._mask_cache[cache_key] = mask
        
        return self._mask_cache[cache_key]
    
    def forward(self, x, adj_matrix=None):
        if adj_matrix is None:
            seq_len = x.size(1)
            mask = self._get_adaptive_mask(seq_len, x.device)
            attn_mask = (mask == 0)
        else:
            attn_mask = (adj_matrix == 0)
        
        attn_output, _ = self.attn(x, x, x, attn_mask=attn_mask)
        return attn_output

# ==========================
# 4. Transformer块（使用AdaptiveSparseAttention）
# ==========================
class TransformerBlock(nn.Module):
    def __init__(self, embed_dim, num_heads, hidden_dim, dropout=0.1, connectivity_ratio=0.5):
        super().__init__()
        # 直接使用AdaptiveSparseAttention
        self.attn = AdaptiveSparseAttention(embed_dim, num_heads, dropout, connectivity_ratio)
        
        self.norm1 = nn.LayerNorm(embed_dim)
        self.norm2 = nn.LayerNorm(embed_dim)
        self.ff = nn.Sequential(
            nn.Linear(embed_dim, hidden_dim),
            nn.GELU(),  # 使用GELU激活函数，通常效果更好
            nn.Dropout(dropout),
            nn.Linear(hidden_dim, embed_dim)
        )
        self.dropout = nn.Dropout(dropout)
    
    def forward(self, x, adj_matrix=None):
        # 注意力 + 残差连接
        x = self.norm1(x + self.dropout(self.attn(x, adj_matrix)))
        # 前馈网络 + 残差连接
        x = self.norm2(x + self.dropout(self.ff(x)))
        return x

# ==========================
# 5. RAE-GCN-Transformer模型（优化版，使用AdaptiveSparseAttention）
# ==========================
class RAE_GCN_Transformer(nn.Module):
    def __init__(self, input_dim, seq_len, embed_dim=32, num_heads=4, num_layers=3, hidden_dim=64, latent_dim=32, dropout=0.1, num_classes=11, connectivity_ratio=0.5):
        super().__init__()
        self.input_dim = input_dim
        self.seq_len = seq_len
        self.latent_dim = latent_dim
        self.embed_dim = embed_dim
        self.num_heads = num_heads
        
        # 增大嵌入维度，减少信息瓶颈
        self.embedding = nn.Linear(input_dim, embed_dim)
        
        # 主要的Transformer编码器（使用AdaptiveSparseAttention）
        self.encoder_layers = nn.ModuleList([
            TransformerBlock(embed_dim, num_heads, hidden_dim, dropout, connectivity_ratio) 
            for _ in range(num_layers)
        ])
        
        # 保持更高的特征维度，减少信息损失
        self.feature_projection = nn.Linear(embed_dim, latent_dim)
        
        # 简化GCN：只在序列长度合适时使用
        if seq_len <= 20:  # 只有当序列长度不太长时才使用GCN
            self.use_gcn = True
            # GCN在时序维度上工作，输入输出都是latent_dim
            self.gcn = GraphConvolution(latent_dim, latent_dim)
            self.gcn_norm = nn.LayerNorm(latent_dim)
        else:
            self.use_gcn = False
        
        # 时序注意力矩阵
        self.register_buffer('temporal_adj_matrix', self._create_temporal_adjacency_matrix(seq_len))
        
        # 增强的分类器
        self.classifier = nn.Sequential(
            nn.Linear(latent_dim, hidden_dim),
            nn.ReLU(),
            nn.Dropout(dropout),
            nn.Linear(hidden_dim, num_classes)
        )
    
    def _create_temporal_adjacency_matrix(self, seq_len):
        """创建简化的时序维度邻接矩阵"""
        adj_matrix = torch.eye(seq_len)
        # 只添加相邻时间步的连接，减少远距离连接
        for i in range(seq_len - 1):
            adj_matrix[i, i + 1] = 1.0
            adj_matrix[i + 1, i] = 1.0
        
        # 只添加少量远距离连接
        for i in range(0, seq_len, 3):  # 每隔3个时间步添加一个连接
            for j in range(i + 2, min(i + 4, seq_len)):
                adj_matrix[i, j] = 0.5
                adj_matrix[j, i] = 0.5
        
        return adj_matrix
    
    def forward(self, x):
        # x: [batch, seq_len, input_dim]
        batch_size, seq_len, input_dim = x.size()
        
        # 1. 输入嵌入（保持更高维度）
        x_embedded = self.embedding(x)  # [batch, seq_len, embed_dim]
        
        # 2. 主要的Transformer编码器（使用AdaptiveSparseAttention）
        encoded = x_embedded
        for encoder_layer in self.encoder_layers:
            encoded = encoder_layer(encoded, self.temporal_adj_matrix)
        
        # 3. 特征投影（减少维度，但不过度压缩）
        features = self.feature_projection(encoded)  # [batch, seq_len, latent_dim]
        
        # 4. 时序信息聚合 - 使用注意力池化而不是简单平均
        # 计算每个时间步的重要性权重
        attention_weights = torch.softmax(features.sum(dim=2), dim=1)  # [batch, seq_len]
        temporal_features = torch.sum(features * attention_weights.unsqueeze(2), dim=1)  # [batch, latent_dim]
        
        # 5. 可选的GCN增强（只在合适时使用）
        if self.use_gcn:
            # 简化GCN使用：在时序维度上应用图卷积
            # 将每个时间步作为一个图节点
            # 创建时序图的邻接矩阵 (seq_len x seq_len)
            temporal_adj = torch.eye(seq_len, device=x.device)
            if seq_len > 1:
                for i in range(seq_len - 1):
                    temporal_adj[i, i + 1] = 0.8
                    temporal_adj[i + 1, i] = 0.8
            
            # 应用GCN: features是[batch, seq_len, latent_dim]
            try:
                gcn_features = torch.relu(self.gcn(features, temporal_adj))  # [batch, seq_len, latent_dim]
                gcn_features = self.gcn_norm(gcn_features)
                
                # 池化得到最终特征
                gcn_pooled = gcn_features.mean(dim=1)  # [batch, latent_dim]
                enhanced_features = temporal_features + 0.2 * gcn_pooled  # 轻量融合
            except RuntimeError as e:
                # 如果GCN失败，直接使用时序特征
                print(f"GCN应用失败，跳过: {e}")
                enhanced_features = temporal_features
        else:
            enhanced_features = temporal_features
        
        # 6. 最终分类
        out = self.classifier(enhanced_features)
        
        return out

def knn_accuracy(train_embeddings, train_labels, val_embeddings, val_labels, k=5):
    knn_classifier = KNeighborsClassifier(n_neighbors=k)
    knn_classifier.fit(train_embeddings, train_labels)
    train_acc = knn_classifier.score(train_embeddings, train_labels)
    val_acc = knn_classifier.score(val_embeddings, val_labels)
    return train_acc, val_acc

def main():
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    batch_size = 512
    seq_len = 10
    
    # 加载训练数据集
    train_data = BSM1Dataset('BSM1_WWTP_data/train_data.csv', seq_len=seq_len)
    
    # 从训练数据集中划分出验证集（20%）
    train_size = int(len(train_data) * 0.8)
    val_size = len(train_data) - train_size
    train_dataset, val_dataset = torch.utils.data.random_split(train_data, [train_size, val_size])
    
    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
    val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False)
    
    input_dim = train_data.data.shape[1]
    num_classes = len(np.unique(train_data.labels))
    
    # 创建模型（使用AdaptiveSparseAttention，connectivity_ratio=0.5为最佳配置）
    model = RAE_GCN_Transformer(
        input_dim=input_dim, 
        seq_len=seq_len, 
        num_classes=num_classes,
        connectivity_ratio=0.5  # AdaptiveSparseAttention的最佳连接密度
    ).to(device)
    
    optimizer = optim.Adam(model.parameters(), lr=5e-4, weight_decay=4e-5)
    criterion = nn.CrossEntropyLoss()
    
    loss_val = []
    acc_val = []
    best_val_acc = 0.0
    
    print(f"开始训练 RAE-GCN-Transformer (AdaptiveSparseAttention)")
    print(f"模型参数: embed_dim={model.embed_dim}, num_heads={model.num_heads}, layers={len(model.encoder_layers)}")
    print(f"连接密度: {0.5}")
    
    for epoch in range(100):
        model.train()
        train_loss = 0.0
        for x, y in train_loader:
            x = x.to(device)
            y = y.to(device).long()
            out = model(x)
            loss = criterion(out, y)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            train_loss += loss.item()
        
        # 验证
        model.eval()
        val_loss = 0.0
        val_correct = 0
        val_total = 0
        with torch.no_grad():
            for x, y in val_loader:
                x = x.to(device)
                y = y.to(device).long()
                out = model(x)
                loss = criterion(out, y)
                val_loss += loss.item()
                _, predicted = torch.max(out.data, 1)
                val_total += y.size(0)
                val_correct += (predicted == y).sum().item()
        
        val_acc = val_correct / val_total
        loss_val.append(val_loss / len(val_loader))
        acc_val.append(val_acc)
        
        # 保存最佳模型
        if val_acc > best_val_acc:
            best_val_acc = val_acc
            torch.save(model.state_dict(), 'model/model_RAE_GCN_Transformer_BSM1.pth')
            print(f"Epoch {epoch+1}: 新的最佳验证准确率 {val_acc:.4f}")
        
        if (epoch + 1) % 10 == 0:
            print(f'Epoch {epoch+1}, 训练损失: {train_loss/len(train_loader):.4f}, 验证损失: {val_loss/len(val_loader):.4f}, 验证准确率: {val_acc:.4f}')
    
    print(f"最佳验证准确率: {best_val_acc:.4f}")
    
    # 保存训练历史
    os.makedirs('model', exist_ok=True)
    np.save('model/loss_val_RAE_GCN_Transformer_BSM1.npy', loss_val)
    np.save('model/acc_val_RAE_GCN_Transformer_BSM1.npy', acc_val)
    
    # 绘制训练曲线
    plt.figure(figsize=(12, 4))
    plt.subplot(1, 2, 1)
    plt.plot(loss_val)
    plt.title('验证损失')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    
    plt.subplot(1, 2, 2)
    plt.plot(acc_val)
    plt.title('验证准确率')
    plt.xlabel('Epoch')
    plt.ylabel('Accuracy')
    plt.axhline(y=best_val_acc, color='r', linestyle='--', label=f'最佳: {best_val_acc:.4f}')
    plt.legend()
    
    plt.tight_layout()
    plt.savefig('model/training_curves_RAE_GCN_Transformer.png', dpi=300, bbox_inches='tight')
    plt.show()

if __name__ == "__main__":
    main()