import numpy as np
import scipy.sparse as sp
import torch
import pandas as pd
import matplotlib.pyplot as plt  # 新增：用于画图
import os
from torch.autograd import Variable

def load_data(path=r"data", dataset=None, month=-1, use_all_ones_adj=False):
    """
    加载数据集
    
    参数:
    path: 数据路径
    dataset: 数据集名称
    month: 月份
    use_all_ones_adj: 是否使用全1邻接矩阵
    """
    # 生成文件名前缀
    base_path = fr"{path}/stock/{dataset}/stock_{month}"
    
    # 读取.content文件
    idx_features_labels = np.genfromtxt(f"{base_path}.content", dtype=np.dtype(str))
    features = sp.csr_matrix(idx_features_labels[:, 1:-1], dtype=np.float32)
    labels = idx_features_labels[:, -1].astype(np.float32)
    
    # 构建图结构
    idx = np.array(idx_features_labels[:, 0], dtype=np.int32)
    idx_map = {j: i for i, j in enumerate(idx)}
    
    # 根据参数决定是否使用全1邻接矩阵
    if use_all_ones_adj:
        # 创建全1邻接矩阵（每个节点与所有其他节点相连）
        n = len(idx)
        adj = sp.coo_matrix(np.ones((n, n)), shape=(n, n), dtype=np.float32)
    else:
        # 使用原始边信息创建邻接矩阵
        edges_unordered = np.genfromtxt(f"{base_path}.cites", dtype=np.int32)
        # 修复关键：过滤掉不在 feature 节点列表中的边
        valid_nodes = set(idx)
        edges_filtered = [e for e in edges_unordered if e[0] in valid_nodes and e[1] in valid_nodes]
        edges_filtered = np.array(edges_filtered)
        # 映射索引（重新用 idx_map 转换成索引位置）
        edges = np.array(list(map(idx_map.get, edges_filtered.flatten())), dtype=np.int32).reshape(-1, 2)
        
        # 创建邻接矩阵
        adj = sp.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])),
                            shape=(len(idx), len(idx)),
                            dtype=np.float32)
        # 对称化邻接矩阵
        adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)
    
    # 归一化处理
    # features = normalize_features(features)
    adj = normalize_adj(adj + sp.eye(adj.shape[0]))
    
    # 转换为PyTorch张量
    adj = torch.FloatTensor(np.array(adj.todense()))
    features = torch.FloatTensor(np.array(features.todense()))
    labels = torch.FloatTensor(labels).flatten()
    
    return idx, adj, features, labels

def normalize_adj(mx):
    """Row-normalize sparse matrix"""
    rowsum = np.array(mx.sum(1))
    r_inv_sqrt = np.power(rowsum, -0.5).flatten()
    r_inv_sqrt[np.isinf(r_inv_sqrt)] = 0.
    r_mat_inv_sqrt = sp.diags(r_inv_sqrt)
    return mx.dot(r_mat_inv_sqrt).transpose().dot(r_mat_inv_sqrt)

def normalize_features(mx):
    """Row-normalize sparse matrix"""
    rowsum = np.array(mx.sum(1))
    r_inv = np.power(rowsum, -1).flatten()
    r_inv[np.isinf(r_inv)] = 0.
    r_mat_inv = sp.diags(r_inv)
    mx = r_mat_inv.dot(mx)
    return mx

def rank(x):
    rank_x = torch.empty_like(x)  # 保持原始类型
    rank_x[torch.argsort(x, descending=True)] = torch.arange(1, len(x)+1, device=x.device).to(x.dtype)
    return rank_x

def spearman_IC(pred, target):
    """计算Spearman秩相关系数，处理边缘情况
    
    确保pred和target在同一设备上，并正确处理相关性计算
    """
    # 确保两者在同一设备上
    if pred.device != target.device:
        target = target.to(pred.device)
    
    # 转为numpy进行计算
    pred_np = pred.detach().cpu().numpy().flatten()
    target_np = target.detach().cpu().numpy().flatten()
    
    # 移除包含NaN的对
    valid_mask = ~(np.isnan(pred_np) | np.isnan(target_np))
    pred_clean = pred_np[valid_mask]
    target_clean = target_np[valid_mask]
    
    # print(f"Pred variance: {np.var(pred_np)}, Target variance: {np.var(target_np)}")

    # 检查是否有足够的数据点和方差
    if len(pred_clean) < 2 or np.std(pred_clean) < 1e-8 or np.std(target_clean) < 1e-8:
        # 当没有足够数据或方差太小时，返回0而不是NaN
        return torch.tensor(0.0, device=pred.device)
    
    # 使用numpy计算相关性
    try:
        from scipy.stats import spearmanr
        corr, _ = spearmanr(pred_clean, target_clean)
        
        # 检查相关性是否为NaN
        if np.isnan(corr):
            return torch.tensor(0.0, device=pred.device)
            
        return torch.tensor(corr, device=pred.device)
    except:
        # 捕获任何计算错误并返回0
        return torch.tensor(0.0, device=pred.device)

def pearson_IC(x, y):
    # 输入检查
    assert x.shape == y.shape, "Tensors must have the same shape"
    
    # 计算皮尔逊相关系数
    mean_x = x.mean()
    mean_y = y.mean()
    
    cov = ((x - mean_x) * (y - mean_y)).mean()
    std_x = torch.sqrt(((x - mean_x)**2).mean())
    std_y = torch.sqrt(((y - mean_y)**2).mean())
    
    if std_x == 0 or std_y == 0:
        return torch.tensor(float('nan'))  # 处理全等值情况
    
    return cov / (std_x * std_y)


def make_loss_figure(train_loss_curve, val_loss_curve, round_idx, model, is_res):
    # 新增：画 loss 曲线图并保存
    plt.figure(figsize=(8, 5))
    plt.plot(train_loss_curve, label='Train Loss')
    plt.plot(val_loss_curve, label='Val Loss')
    plt.xlabel("Epoch")
    plt.ylabel("Loss")
    plt.title(f"Loss Curve - Round {round_idx}")
    plt.legend()
    plt.grid(True)
    plt.tight_layout()
    os.makedirs("figures", exist_ok=True)
    if model == 'gat':
        plt.savefig(f"figures/{model}/Res_{is_res}/loss_curve_round_{round_idx}.png")
    else:
        plt.savefig(f"figures/{model}/loss_curve_round_{round_idx}.png")
    plt.close()

def make_IC_figure(train_IC_curve, val_IC_curve, round_idx, model, is_res):
    # 新增：画 loss 曲线图并保存
    plt.figure(figsize=(8, 5))
    plt.plot(train_IC_curve, label='Train Loss')
    plt.plot(val_IC_curve, label='Val Loss')
    plt.xlabel("Epoch")
    plt.ylabel("IC")
    plt.title(f"IC Curve - Round {round_idx}")
    plt.legend()
    plt.grid(True)
    plt.tight_layout()
    os.makedirs("figures", exist_ok=True)
    if model == 'gat':
        plt.savefig(f"figures/{model}/Res_{is_res}/IC_curve_round_{round_idx}.png")
    else:
        plt.savefig(f"figures/{model}/IC_curve_round_{round_idx}.png")
    plt.close()    

def create_all_ones_adj(adj):
    """生成与给定邻接矩阵相同形状的全1矩阵，确保在正确的设备上，并转换为Variable"""
    device = adj.device  # 获取原始邻接矩阵的设备
    
    if isinstance(adj, torch.sparse.FloatTensor):
        n = adj.size(0)
        # 创建完全连接的邻接矩阵索引
        rows, cols = torch.meshgrid(torch.arange(n, device=device), 
                                     torch.arange(n, device=device))
        indices = torch.stack([rows.flatten(), cols.flatten()])
        values = torch.ones(indices.size(1), device=device)
        # 创建稀疏矩阵
        all_ones_adj = torch.sparse.FloatTensor(indices, values, adj.size())
    else:
        # 如果是密集矩阵
        all_ones_adj = torch.ones_like(adj).to(device)
    
    # 转换为Variable
    all_ones_adj = Variable(all_ones_adj.float())
    
    return all_ones_adj

def huber_loss(y_pred, y_true, delta):
    residual = torch.abs(y_pred - y_true)
    condition = residual < delta
    loss = torch.where(
        condition,
        0.5 * residual ** 2,
        delta * residual - 0.5 * delta ** 2
    )
    return loss.mean()