import itertools
import os
import random
import subprocess
import time
import wandb
import h5py
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import torch
from sklearn.metrics import confusion_matrix, roc_auc_score
from sklearn.utils.class_weight import compute_sample_weight
from torch.utils.data import (
    DataLoader,
    Dataset,
    SequentialSampler,
    WeightedRandomSampler,
)
from torch.utils.tensorboard import SummaryWriter
from model import AttentionNet

from utils import set_seed, seed_worker,FeatureBagsDataset,get_class_names,define_data_sampling
from utils import EarlyStopping, get_lr, compute_auc ,render_confusion_matrix,compute_auc_each_class

from moco.config import Config 
args = Config("im4MEC/configjson/train_config.json").get_config()


def evaluate_model(model, loader, n_classes, loss_fn, device):
    model.eval()
    avg_loss = 0.0

    preds = np.zeros(len(loader))
    probs = np.zeros((len(loader), n_classes))
    labels = np.zeros(len(loader))
    with torch.no_grad():
        for batch_idx, (data, label) in enumerate(loader):
            data, label = data.to(device), label.to(device)
            logits, Y_prob, Y_hat, _, _ = model(data)

            loss = loss_fn(logits, label)
            avg_loss += loss.item()

            preds[batch_idx] = Y_hat.item()
            probs[batch_idx] = Y_prob.cpu().numpy()
            labels[batch_idx] = label.item()

    avg_loss /= len(loader)
    return preds, probs, labels, avg_loss


def run_train_eval_loop(
    train_loader,
    val_loader,
    input_feature_size,
    class_names,
    hparams,
    run_id,
    full_training,
    save_checkpoints,
):
    writer = SummaryWriter(os.path.join("./runs", run_id))
    device = torch.device("cuda")
    loss_fn = torch.nn.CrossEntropyLoss()
    n_classes = len(class_names)
    model = AttentionNet(
        model_size=hparams["model_size"],
        input_feature_size=input_feature_size,
        dropout=True,
        p_dropout_fc=hparams["p_dropout_fc"],
        p_dropout_atn=hparams["p_dropout_atn"],
        n_classes=n_classes,
    )
    model.to(device)
    print(model)
    n_trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
    print(f"Model has {n_trainable_params} parameters")

    optimizer = torch.optim.Adam(
        filter(lambda p: p.requires_grad, model.parameters()),
        lr=hparams["initial_lr"],
        weight_decay=hparams["weight_decay"],
    )

    # Using a multi-step LR decay routine.
    milestones = [int(x) for x in hparams["milestones"].split(",")]
    scheduler = torch.optim.lr_scheduler.MultiStepLR(
        optimizer, milestones=milestones, gamma=hparams["gamma_lr"]
    )

    early_stop_tracker = EarlyStopping(
        patience=hparams["earlystop_patience"],
        min_epochs=hparams["earlystop_min_epochs"],
        verbose=True,
    )

    metric_history = []
    for epoch in range(hparams["max_epochs"]):
        model.train()
        epoch_start_time = time.time()
        train_loss = 0.0
        preds = np.zeros(len(train_loader))
        probs = np.zeros((len(train_loader), n_classes)) # 实现模型的共同预测 以及概率 实现提升模型的提升
        labels = np.zeros(len(train_loader))

        batch_start_time = time.time()
        for batch_idx, (data, label) in enumerate(train_loader):
            data_load_duration = time.time() - batch_start_time

            data, label = data.to(device), label.to(device)
            logits, Y_prob, Y_hat, _, _ = model(data)

            preds[batch_idx] = Y_hat.item()
            probs[batch_idx] = Y_prob.cpu().detach().numpy()
            labels[batch_idx] = label.item()

            loss = loss_fn(logits, label)
            train_loss += loss.item()

            # backward pass
            loss.backward()

            # step
            optimizer.step()
            optimizer.zero_grad()

            batch_duration = time.time() - batch_start_time
            batch_start_time = time.time()

            print(
                f"epoch {epoch}, batch {batch_idx}, batch took: {batch_duration:.2f}s, data loading: {data_load_duration:.2f}s, loss: {loss.item():.4f}, label: {label.item()}"
            )
            writer.add_scalar("data_load_duration", data_load_duration, epoch)
            writer.add_scalar("batch_duration", batch_duration, epoch)

        epoch_duration = time.time() - epoch_start_time
        print(f"Finished training on epoch {epoch} in {epoch_duration:.2f}s")

        train_loss /= len(train_loader)
        train_avg_auc = compute_auc(labels, probs)
        
        # if args.wandb:
        #     wandb.log({"LR": get_lr(optimizer), "Loss/train": train_loss, "AUC/train": train_avg_auc, "Loss/validation": val_loss, "AUC/validation": val_avg_auc})

        writer.add_scalar("epoch_duration", epoch_duration, epoch)
        writer.add_scalar("LR", get_lr(optimizer), epoch)
        writer.add_scalar("Loss/train", train_loss, epoch)
        writer.add_scalar("AUC/train", train_avg_auc, epoch)

        if n_classes > 2:
            train_single_aucs = compute_auc_each_class(labels, probs)
            for class_index in range(n_classes):
                writer.add_scalar(
                    f"AUC/train-{class_names[class_index]}",
                    train_single_aucs[class_index],
                    epoch,
                )

        for class_index in range(n_classes):
            writer.add_pr_curve(
                f"PRcurve/train-{class_names[class_index]}",
                labels == class_index,
                probs[:, class_index],
                epoch,
            )

        if not full_training:
            print("Evaluating model on validation set...")
            preds, probs, labels, val_loss = evaluate_model(model, val_loader, n_classes, loss_fn, device)
            val_avg_auc = compute_auc(labels, probs)

            writer.add_scalar("Loss/validation", val_loss, epoch)
            writer.add_scalar("AUC/validation", val_avg_auc, epoch)
            if args.wandb:
                wandb.log({"LR": get_lr(optimizer), "Loss/train": train_loss, "AUC/train": train_avg_auc, "Loss/validation": val_loss, "AUC/validation": val_avg_auc})

            for class_index in range(n_classes):
                writer.add_pr_curve(
                    f"PRcurve/validation-{class_names[class_index]}",
                    labels == class_index,
                    probs[:, class_index],
                    epoch,
                )

            metric_dict = {
                "epoch": epoch,
                "val_loss": val_loss,
                "val_auc": val_avg_auc,
                "trainable_params": n_trainable_params,
            }

            if n_classes > 2:
                val_single_aucs = compute_auc_each_class(labels, probs)
                for class_index in range(n_classes):
                    writer.add_scalar(
                        f"AUC/validation-{class_names[class_index]}",
                        val_single_aucs[class_index],
                        epoch,
                    )
                for idx, each_auc_class in enumerate(val_single_aucs):
                    metric_dict[f"val_auc_{class_names[idx]}"] = each_auc_class

                cm = confusion_matrix(
                    [class_names[l] for l in labels.astype(int)],
                    [class_names[p] for p in preds.astype(int)],
                    labels=class_names,
                )
                writer.add_figure(
                    "Confusion matrix",
                    render_confusion_matrix(cm, class_names, normalize=False),
                    epoch,
                )
                writer.add_figure(
                    "Normalized confusion matrix",
                    render_confusion_matrix(cm, class_names, normalize=True),
                    epoch,
                )

            metric_history.append(metric_dict)
            early_stop_tracker(epoch, val_loss)

        if save_checkpoints:
            torch.save(
                model.state_dict(),
                os.path.join(writer.log_dir, f"{epoch}_checkpoint.pt"),
            )

        # Update LR decay.
        scheduler.step()

        if early_stop_tracker.early_stop:
            print(f"Early stop criterion reached. Broke off training loop after epoch {epoch}.")
            break

    if not full_training:
        # Log the hyperparameters of this experiment and the performance metrics of the best epoch.
        best = sorted(metric_history, key=lambda x: x["val_loss"])[0]
        writer.add_hparams(hparams, best)

    writer.close()


def main(args):

    set_seed()
    df = pd.read_csv(args.manifest)
    class_names = get_class_names(df)
    fold_index = str(args.fold) # 折叠文件夹索引

    if args.full_training :
        print(
            f"Training on full dataset (training + validation) with hparam set {args.full_training}"
        )
        if args.fold is not None:
            raise Exception()
        training_set = df
        val_split = [None]
        base_run_id = f"full_dataset"
    else:
        print(f"=> Fold {fold_index}")
        base_run_id = f"fold_{fold_index}"
        try:
            training_set = df[df[f"fold-{fold_index}"] == "training"]
            validation_set = df[df[f"fold-{fold_index}"] == "validation"]
        except:
            raise Exception(f"Column fold-{fold_index} does not exist in {args.manifest}")

        val_split = FeatureBagsDataset(validation_set, args.data_dir)
    train_split = FeatureBagsDataset(training_set, args.data_dir)

    git_sha = (subprocess.check_output(["git", "describe", "--always"]).strip().decode("utf-8"))
    train_run_id = f"{git_sha}_{time.strftime('%Y%m%d-%H%M')}"

    print(f"=> Git SHA {train_run_id}")
    print(f"=> Training on {len(train_split)} samples")
    print(f"=> Validating on {len(val_split)} samples")

    base_hparams = dict(
        sampling_method="random",
        max_epochs=500,
        earlystop_patience=20,
        earlystop_min_epochs=20,#实现earlystop的最小epoch
        # Optimizer settings
        initial_lr=1e-3,
        milestones="2, 5, 15, 30",
        gamma_lr=0.1,
        weight_decay=1e-5,
        # Model architecture parameters. See model class for details.
        model_size="small",
        p_dropout_fc=0.5,
        p_dropout_atn=0.25,
    )

    hparam_sets = [# 参数列表
        base_hparams,
        {
            **base_hparams,
            "initial_lr": 1e-4,
            "milestones": "5, 15, 30",
        },
        {
            **base_hparams,
            "initial_lr": 1e-5,
            "milestones": "10, 30",
        },
        {
            **base_hparams,
            "weight_decay": 1e-3,
        },
    ]

    hparams_to_use = hparam_sets
    if args.full_training : # 参数扫描结果 进行保存
        hparams_to_use = [hparam_sets[args.full_training]]

    for i, hps in enumerate(hparams_to_use):
        run_id = f"{base_run_id}_{hps['model_size']}_{hps['sampling_method']}_hp{i}_{train_run_id}"
        print(f"Running train-eval loop {i} for {run_id}")
        print(hps)

        train_loader, val_loader = define_data_sampling(
            train_split,
            val_split,
            method=hps["sampling_method"],
            workers=args.workers,
        )

        run_train_eval_loop(
            train_loader=train_loader,
            val_loader=val_loader,
            input_feature_size=args.input_feature_size,
            class_names=class_names,
            hparams=hps,
            run_id=run_id,
            full_training=args.full_training,
            save_checkpoints=not args.full_training,
        )

    print("Finished training.")

if __name__ == "__main__":
    main(args)
