# main.py
import argparse
import torch
torch.set_num_threads(1)
from torch.utils.data import DataLoader
from src.data.dataset import ADHDDataset
import config.config_dl as config
from src.utils.get import get_df, get_output_dir, get_split_gen
from src.training.training_dl import run_classification, run_regression
from src.utils.save import save_metrics, save_hyperparameters
from src.utils.visualize import (
    plot_10fold_loss, plot_10fold_confusion_matrix, plot_10fold_predicted_vs_true,
    plot_metrics, plot_confusion_matrix, plot_predicted_vs_true
)

parser = argparse.ArgumentParser()
parser.add_argument("--model", type=str, default="mobile",
                    choices=["transformer","alex","res","mobile","mlp","lstm","rnn"])
parser.add_argument("--seed", type=int, default=42)
parser.add_argument("--experiment", type=str, default="classification",
                    choices=["classification", "regression"])
parser.add_argument("--early_stop", action="store_true", help="是否使用早停")
parser.add_argument("--cv", action="store_true", help="是否使用十折交叉验证")
parser.add_argument("--sensitive", action="store_true", help="是否使用敏感性分析数据")
args = parser.parse_args()

# === 数据和输出目录 ===
df = get_df(args.experiment, args.sensitive)
print("df.shape:", df.shape)
output_dir, csv_path = get_output_dir(
    experiment=args.experiment,
    model=args.model,
    cv=args.cv,
    sensitive=args.sensitive,
)

# === 保存超参数 ===
save_hyperparameters(args, config, output_dir, pipeline="dl")
print(f"超参数已保存到 {output_dir}/hyperparameters.json")

# === 数据切分器 ===
split_gen = get_split_gen(df, args.experiment, sensitive=args.sensitive, seed=args.seed, cv=args.cv)

# === 训练和评估 ===
# 用于记录指标
all_train_loss, all_val_loss = [], []
all_cm, all_labels, all_preds = [], [], []

for fold, (train_idx, val_idx) in enumerate(split_gen):
    drop_cols = [c for c in ['subject_id', 'impute_id'] if c in df.columns]  # age_group/cesd_group 列删掉，不参与训练
    train_df = df.iloc[train_idx].drop(columns=drop_cols)
    val_df   = df.iloc[val_idx].drop(columns=drop_cols)

    print(f"Fold {fold+1} - Train size: {len(train_df)}, Val size: {len(val_df)}")

    # Dataset & DataLoader
    train_dataset = ADHDDataset(train_df, task=args.experiment, fit=True)
    val_dataset   = ADHDDataset(val_df, task=args.experiment, fit=False,
                                scaler=train_dataset.scaler,
                                cat_encoders=train_dataset.cat_encoders)
    train_loader = DataLoader(train_dataset, batch_size=config.BATCH_SIZE, num_workers=2, shuffle=True)
    val_loader   = DataLoader(val_dataset, batch_size=config.BATCH_SIZE, num_workers=2, shuffle=False)

    # 训练
    if args.experiment == "classification":
        metrics, loss, cm, model_dict = run_classification(args, train_loader, val_loader, train_dataset, args.early_stop)
        all_cm.append(cm)
    else:
        metrics, loss, labels, preds, model_dict = run_regression(args, train_loader, val_loader, train_dataset, args.early_stop)
        all_labels.append(labels)
        all_preds.append(preds)

    # 记录指标
    metrics["fold"] = fold + 1
    all_train_loss.append(loss["train_loss"])
    all_val_loss.append(loss["val_loss"])
    save_metrics(metrics, csv_path)

# === 可视化 ===
if args.cv:
    plot_10fold_loss(all_train_loss, all_val_loss, f"{output_dir}/10fold_loss.png")
    if args.experiment == "classification":
        plot_10fold_confusion_matrix(all_cm, f"{output_dir}/10fold_confusion_matrix.png")
    else:
        plot_10fold_predicted_vs_true(all_labels, all_preds, f"{output_dir}/10fold_predicted_vs_true.png")
else:
    # 普通训练
    plot_metrics({"train_loss": all_train_loss[0], "val_loss": all_val_loss[0]}, f"{output_dir}/loss.png")
    if args.experiment == "classification":
        plot_confusion_matrix(all_cm[0], f"{output_dir}/confusion_matrix.png")
    else:
        plot_predicted_vs_true(all_labels[0], all_preds[0], f"{output_dir}/predicted_vs_true.png")

# 保存模型
torch.save(model_dict, f"{output_dir}/best_model_{args.model}.pth")
