import os

import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader

# 假设模型和工具函数已正确导入
from model.transform_with_time_dist import CombinedModel
from utils import utils


# ------------------ 1. 数据集类 (已清理) ------------------
class FuturesDataset(Dataset):
    """
    期货数据集，只包含模型训练所需的最基本数据:
    X (输入特征), M (邻接矩阵), Y (标准化的标签)。
    """

    def __init__(self, x_data, y_data, matrix_data):
        self.x = torch.as_tensor(x_data, dtype=torch.float32)
        self.y = torch.as_tensor(y_data, dtype=torch.float32)  # y 是标准化的
        self.m = torch.as_tensor(matrix_data, dtype=torch.float32)
        assert len(self.x) == len(self.y) == len(self.m)

    def __len__(self):
        return len(self.x)

    def __getitem__(self, i):
        return self.x[i], self.m[i], self.y[i]


# ------------------ 2. 损失函数 (保持不变) ------------------
def pairwise_ranking_loss(y_pred, y_true):
    """
    RankNet-style pairwise ranking loss for batched data.

    Args:
        y_pred: [B, N, 1] - 预测收益率或得分
        y_true: [B, N, 1] - 真实收益率或标签（可连续）

    Returns:
        标量 loss（对 batch 求平均）
    """
    B, N, _ = y_pred.shape

    # 去掉最后一个维度
    y_pred = y_pred.squeeze(-1)  # [B, N]
    y_true = y_true.squeeze(-1)  # [B, N]

    # 构造 pairwise 差值矩阵
    diff_pred = y_pred.unsqueeze(2) - y_pred.unsqueeze(1)  # [B, N, N]
    diff_true = y_true.unsqueeze(2) - y_true.unsqueeze(1)  # [B, N, N]

    # s_ij: +1 / -1 / 0
    s_ij = torch.sign(diff_true)

    # mask 掩码：只保留有序对
    mask = s_ij != 0

    # 取有效 pairwise 差值
    s_ij = s_ij[mask]
    diff_pred = diff_pred[mask]

    # RankNet 损失：log(1 + exp(-s_ij * diff_pred))
    loss = F.softplus(-s_ij * diff_pred).mean()  # softplus(x)=log(1+exp(x))，稳定版

    return loss


# ------------------ 3. 画图函数 (已简化) ------------------
def plot_validation_predictions_normalized(model, val_loader, futures, device="cuda"):
    """在标准化的量纲上绘制验证集的预测结果，并显示方向胜率。"""
    model.eval()
    all_preds, all_labels = [], []
    with torch.no_grad():
        for X, M, y_normalized in val_loader:
            X, M = X.to(device), M.to(device)
            y_pred_normalized, _ = model(X, M)
            all_preds.append(y_pred_normalized.cpu())
            all_labels.append(y_normalized.cpu())

    preds = torch.cat(all_preds).numpy().squeeze(-1)
    labels = torch.cat(all_labels).numpy().squeeze(-1)

    fig, axes = plt.subplots(
        len(futures), 1, figsize=(20, 5 * len(futures)), sharex=True
    )
    axes = [axes] if len(futures) == 1 else axes
    fig.suptitle(
        "Validation Predictions vs Ground Truth (Normalized Scale)", fontsize=16
    )

    winrates = []

    for i, f in enumerate(futures):
        y_true = labels[:, i]
        y_pred = preds[:, i]

        # === 胜率计算 ===
        mask = (np.abs(y_pred) >= 1e-4) & (np.abs(y_true) >= 1e-4)  # 忽略过小预测
        valid_true = y_true[mask]
        valid_pred = y_pred[mask]

        if len(valid_true) > 0:
            wins = np.sign(valid_true) == np.sign(valid_pred)
            winrate = wins.sum() / len(wins)
        else:
            winrate = np.nan

        winrates.append(winrate)

        # === 绘图 ===
        axes[i].plot(y_true, label="True (Normalized)", color="blue", linewidth=1.5)
        axes[i].plot(
            y_pred, label="Pred (Normalized)", color="red", linewidth=1.0, alpha=0.8
        )
        axes[i].set_title(f"Asset: {f.upper()} | Win rate: {winrate*100:.2f}%")
        axes[i].axhline(y=0, color="gray", linestyle="--", linewidth=1)
        axes[i].legend()
        axes[i].set_ylabel("Normalized Value (Std Dev Units)")

    plt.xlabel("Time Index")
    plt.tight_layout(rect=[0, 0, 1, 0.96])
    plt.show()

    # === 打印总体胜率 ===
    avg_winrate = np.nanmean(winrates)
    print("-" * 50)
    print(f"Average directional win rate across all assets: {avg_winrate*100:.2f}%")
    for i, f in enumerate(futures):
        print(f"  {f.upper()}: {winrates[i]*100:.2f}%")
    print("-" * 50)


# ------------------ 4. 训练 & 验证函数 (已简化) ------------------
def run_epoch(model, loader, optimizer, device, train=True, lambda_rank=0.003):
    if train:
        model.train()
    else:
        model.eval()

    total_loss = mse_sum = mae_sum = n = 0.0

    for X, M, y_normalized in loader:
        X, M, y_normalized = X.to(device), M.to(device), y_normalized.to(device)

        with torch.set_grad_enabled(train):
            y_pred_normalized, _ = model(X, M)

            mse_loss = F.mse_loss(y_pred_normalized, y_normalized)
            rank_loss = pairwise_ranking_loss(y_pred_normalized, y_normalized)

            loss = mse_loss + lambda_rank * rank_loss

        if train:
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        batch_size = X.size(0)
        total_loss += loss.item() * batch_size

        with torch.no_grad():
            mse_sum += mse_loss.item() * batch_size
            mae_sum += F.l1_loss(y_pred_normalized, y_normalized).item() * batch_size
        n += batch_size

    avg_loss = total_loss / n
    avg_mse = mse_sum / n
    avg_mae = mae_sum / n

    return avg_loss, avg_mse, avg_mae


# ------------------ 5. 主函数 (已简化) ------------------
if __name__ == "__main__":
    # --- 配置 ---
    LEARNING_RATE, BATCH_SIZE, EPOCHS, HADDEN_DIM = 1e-4, 256, 50, 768
    device = "cuda" if torch.cuda.is_available() else "cpu"
    # futures, data_folder, window_lags = ["a", "ag", "al"], "./data/tech", 16
    futures, data_folder, window_lags = ["a", "c", "y"], "./data/tech", 16
    # futures, data_folder, window_lags = (
    #     ["aapl", "amgn", "crm", "csco", "ibm", "intc", "msft", "nke", "vz", "wmt"],
    #     "./data/tech",
    #     16,
    # )
    early_stop_patience = 20
    no_improve_epochs = 0

    model_save_path = "./checkpoints/combined_model_normalized.pth"
    print(f"使用设备: {device}")

    # --- 加载数据 ---
    print("正在加载和处理数据...")
    # 假设 utils 函数返回 X, y_normalized, M
    (X_train, y_train_norm, M_train, X_val, y_val_norm, M_val) = (
        utils.load_or_create_dataset(
            futures_list=futures,
            data_folder=data_folder,
            lags=window_lags,
        )
    )

    # --- 创建数据集和加载器 ---
    train_dataset = FuturesDataset(X_train, y_train_norm, M_train)
    val_dataset = FuturesDataset(X_val, y_val_norm, M_val)
    train_loader = DataLoader(train_dataset, BATCH_SIZE, shuffle=True)
    val_loader = DataLoader(val_dataset, BATCH_SIZE, shuffle=False)
    print("数据加载成功")

    # --- 模型和优化器 ---
    _, n_assets, L, C = X_train.shape
    model = CombinedModel(C, HADDEN_DIM, 1).to(device)

    if os.path.exists(model_save_path):
        try:
            print(f"加载已有模型权重: {model_save_path}")
            model.load_state_dict(torch.load(model_save_path, map_location=device))
            print("模型权重加载成功")
        except Exception as e:
            print(f"加载模型时发生错误: {e}")

    optimizer = optim.AdamW(model.parameters(), lr=LEARNING_RATE)
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
        optimizer,
        mode="min",  # 我们希望 val_loss 越小越好
        factor=0.5,  # 学习率衰减因子，比如每次减半
        patience=5,  # 连续 5 个 epoch 没有提升才降低学习率
        min_lr=1e-6,  # 学习率下限
    )

    # --- 训练循环 ---
    best_loss = float("inf")
    for epoch in range(1, EPOCHS + 1):
        train_loss, train_mse, train_mae = run_epoch(
            model, train_loader, optimizer, device, train=True
        )
        val_loss, val_mse, val_mae = run_epoch(
            model, val_loader, None, device, train=False
        )

        # 日志中的所有指标现在都是标准化的 (Normalized)
        print(
            f"Epoch {epoch:02d} | "
            f"Train Loss(N): {train_loss:.4f}, MSE(N): {train_mse:.4f}, MAE(N): {train_mae:.4f} | "
            f"Val Loss(N): {val_loss:.4f}, MSE(N): {val_mse:.4f}, MAE(N): {val_mae:.4f}"
        )

        if val_loss < best_loss:
            best_loss = val_loss
            os.makedirs(os.path.dirname(model_save_path), exist_ok=True)
            torch.save(model.state_dict(), model_save_path)
            print(f"  ↳ 验证集 loss(标准化) 改进，模型已保存至 {model_save_path}")
            no_improve_epochs = 0
        else:
            no_improve_epochs += 1
        current_lr = optimizer.param_groups[0]["lr"]
        print(
            f"Epoch {epoch:02d} | Val Loss: {val_loss:.4f} | Current LR: {current_lr:.6f}"
        )
        scheduler.step(val_loss)
        if no_improve_epochs >= early_stop_patience:
            print(f"⏹️ Early stopping at epoch {epoch}")
            break

    # --- 训练结束后 ---
    print("\n训练完成!")
    print(f"最佳验证集 loss (标准化): {best_loss:.4f}")

    print("加载最佳模型进行预测可视化...")
    best_model = CombinedModel(C, HADDEN_DIM, 1).to(device)
    best_model.load_state_dict(torch.load(model_save_path, map_location=device))

    # 使用简化后的绘图函数
    plot_validation_predictions_normalized(best_model, val_loader, futures, device)
    print("可视化图已生成。")
