import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
import time
from torch.utils.data import Dataset
from GCN_model import GCN 
from get_graph_info import get_data # 获取图数据的函数
import matplotlib.pyplot as plt
class GraphDataset(Dataset):
    """图数据集类，用于处理图结构数据和目标值"""
    def __init__(self, node_features, adj_matrix, targets, indices=None):
        """
        初始化图数据集
        
        参数:
            node_features: 节点特征矩阵
            adj_matrix: 邻接矩阵
            targets: 目标值(回归任务中的连续值)
            indices: 可选，用于选择特定节点的索引列表
        """
        self.node_features = node_features
        self.adj_matrix = adj_matrix
        self.targets = targets
        
        # 如果提供了索引，则只选择特定节点
        if indices is not None:
            self.indices = indices
            self.targets = self.targets[indices]
        else:
            self.indices = torch.arange(node_features.shape[0])
    
    def __len__(self):
        return len(self.indices)
    
    def __getitem__(self, idx):
        # 返回节点索引和对应的目标值
        return {
            'node_idx': self.indices[idx],
            'target': self.targets[idx]
        }

def load_data(graph_info):
    """
        生成模拟图数据用于回归任务训练和测试
        graph_info: 包含节点特征、邻接矩阵和目标值的元组
    """
    # 生成节点特征
    node_features,adj_matrix,targets = graph_info
    num_nodes = node_features.shape[0]
    node_features = torch.from_numpy(node_features.numpy())
    adj_matrix = torch.from_numpy(adj_matrix.numpy())
    targets = torch.tensor(targets, dtype=torch.float32)
    targets = targets/ targets.max()  # 归一化目标值

    # 划分训练集、验证集和测试集(60%训练，20%验证，20%测试)
    idx = np.arange(num_nodes)
    idx_train, idx_temp = train_test_split(idx, test_size=0.4, random_state=42)
    idx_val, idx_test = train_test_split(idx_temp, test_size=0.5, random_state=42)
    
    return node_features, adj_matrix, targets, idx_train, idx_val, idx_test

def train_model(model, train_loader, val_loader, node_features, adj_matrix, 
                epochs=100, lr=0.0001, weight_decay=5e-4, patience=10):
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    model = model.to(device)
    node_features = node_features.to(device)
    adj_matrix = adj_matrix.to(device)
    # 优化器、学习率调度器和损失函数
    optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay)
    criterion = nn.MSELoss()
    
    best_val_loss = float('inf')
    best_model = None
    early_stop_counter = 0
    iteration = 0
    train_loss_history = []
    val_loss_history = []
    # 训练循环
    print("Starting training...")
    for epoch in range(epochs):
        t = time.time()
        # 训练模式
        model.train()
        train_loss = 0.0
        train_loss_avg = 0.0
        for batch in train_loader:
            node_indices = batch['node_idx'].to(device)
            targets = batch['target'].to(device)
            
            optimizer.zero_grad()
            output = model(node_features, adj_matrix)
            # 只选择当前批次中的节点进行损失计算
            loss = criterion(output[node_indices], targets)
            loss.backward()

            optimizer.step()
            
            train_loss = loss.item()
            train_loss_avg += train_loss
            train_loss_history.append(train_loss)
            iteration += 1
        train_loss_avg /= len(train_loader)
        # 验证模式
        model.eval()
        val_loss = 0.0
        val_loss_avg = 0.0
        with torch.no_grad():
            for batch in val_loader:
                node_indices = batch['node_idx'].to(device)
                targets = batch['target'].to(device)
                
                output = model(node_features, adj_matrix)
                loss = criterion(output[node_indices], targets)
                val_loss = loss.item()
                val_loss_avg += val_loss
                val_loss_history.append(val_loss)
                iteration += 1
            val_loss_avg /= len(val_loader)
        
        # 打印训练进度
        print(f'Epoch: {epoch+1:04d}',
              f'loss_train: {train_loss_avg:.4f}',
              f'loss_val: {val_loss_avg:.4f}',
              f'time: {time.time() - t:.4f}s')
        
        # 早停机制
        if val_loss < best_val_loss:
            best_val_loss = val_loss
            best_model = model.state_dict().copy()
            early_stop_counter = 0
        else:
            early_stop_counter += 1
            if early_stop_counter >= patience:
                print(f"Early stopping after {epoch+1} epochs")
                break
    
    # 加载最佳模型
    model.load_state_dict(best_model)

        # 绘制损失图像
    plt.figure(figsize=(10, 6))
    plt.plot(train_loss_history, label='Training Loss')
    plt.plot(val_loss_history, label='Validation Loss')
    plt.xlabel('Iterations')
    plt.ylabel('Loss')
    plt.title('Training and Validation Loss')
    plt.legend()
    plt.savefig('loss_plot.png')
    plt.show()

    return model

def evaluate_model(model, test_loader, node_features, adj_matrix):
    """评估模型在回归任务上的性能"""
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    model = model.to(device)
    node_features = node_features.to(device)
    adj_matrix = adj_matrix.to(device)
    
    model.eval()
    test_predictions = []
    test_targets = []
    
    with torch.no_grad():
        for batch in test_loader:
            node_indices = batch['node_idx'].to(device)
            targets = batch['target'].to(device)
            
            output = model(node_features, adj_matrix)
            test_predictions.extend(output[node_indices].cpu().numpy())
            test_targets.extend(targets.cpu().numpy())
    
    mse = mean_squared_error(test_targets, test_predictions)
    rmse = np.sqrt(mse)
    mae = mean_absolute_error(test_targets, test_predictions)
    
    print(f"Test set results:",
          f"MSE= {mse:.4f}",
          f"RMSE= {rmse:.4f}",
          f"MAE= {mae:.4f}")
    
    return rmse, mae

# 主函数
if __name__ == "__main__":
    # 设置随机种子以确保结果可复现
    torch.manual_seed(42)
    np.random.seed(42)
    graph_info = get_data()


    '''
    这一部分改成对应数据
    '''
    # node_features,edges,edges_weight,targets = graph_info
    # num_nodes = node_features.shape[0]
    # adj_matrix = torch.zeros(num_nodes, num_nodes)
    # # 填充邻接矩阵
    # src_nodes = edges[0, :]
    # dst_nodes = edges[1, :]
    # edges_weight = torch.tensor(edges_weight, dtype=torch.float32)
    # adj_matrix[src_nodes, dst_nodes] = edges_weight
    # graph_info = (node_features, adj_matrix, targets)
    '''
    garph_info应为(node_features, adj_matrix, targets)三元组
    '''
    graph_info = get_data()


    # 加载数据
    node_features, adj_matrix, targets, idx_train, idx_val, idx_test = load_data(graph_info)
    
    # 创建数据集和数据加载器
    train_dataset = GraphDataset(node_features, adj_matrix, targets, idx_train)
    val_dataset = GraphDataset(node_features, adj_matrix, targets, idx_val)
    test_dataset = GraphDataset(node_features, adj_matrix, targets, idx_test)
    
    train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True)
    val_loader = DataLoader(val_dataset, batch_size=64, shuffle=False)
    test_loader = DataLoader(test_dataset, batch_size=64, shuffle=False)
    
    # 初始化模型 (修改输出维度为1，因为是回归任务)
    model = GCN(
        in_features=node_features.shape[1],
        hidden_features=128,
        num_classes=1,
        dropout_rate=0.2
    )
    
    # 训练模型
    model = train_model(
        model=model,
        train_loader=train_loader,
        val_loader=val_loader,
        node_features=node_features,
        adj_matrix=adj_matrix,
        epochs=200,
        lr=0.01,
        weight_decay=5e-4,
        patience=20
    )
    
    # 评估模型
    evaluate_model(model, test_loader, node_features, adj_matrix)