import time
import numpy as np
from src.utils import data_utils
from src.data.dataset import ADHDDataset
from src.models.classification.dl_mobilenet import MobileNetClassifier
from src.models.classification.dl_alexnet import AlexNetClassifier
from src.models.classification.dl_resnet import ResNetClassifier
from src.models.classification.dl_transformer import TransformerClassifier
from src.models.regression.dl_mlp import MLPRegressor
from src.models.regression.dl_transformer import TransformerRegressor
from src.models.regression.dl_rnn import RNNRegressor
from src.models.regression.dl_lstm import LSTMRegressor
import torch
torch.set_num_threads(1)
import tqdm
import config.config_dl as config
from sklearn.metrics import (
    accuracy_score, classification_report, precision_score, 
    recall_score, f1_score, roc_auc_score, confusion_matrix, balanced_accuracy_score,
    mean_squared_error, mean_absolute_error, r2_score
)
from src.utils.visualize import plot_confusion_matrix, plot_metrics, plot_predicted_vs_true
from thop import profile

def train_loop(model, train_loader, optimizer, criterion, experiment="classification"):
    """训练循环"""
    model.train()
    total_loss = 0
    all_preds = []
    all_labels = []
    start_time = time.time()

    tqdm_train_loader = tqdm.tqdm(train_loader, desc="Training", leave=False)
    for batch in tqdm_train_loader:
        batch = [item.to(config.DEVICE) for item in batch]  # [x_cat, x_cont, y]

        # 前向传播
        outputs = model(batch)
        loss = criterion(outputs, batch[2])

        # 反向传播
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # 记录
        total_loss += loss.item()
        if experiment == "classification":
            all_preds.extend(outputs.argmax(dim=1).cpu().numpy())
            all_labels.extend(batch[2].cpu().numpy())
        tqdm_train_loader.set_postfix(loss=loss.item())

    avg_loss = total_loss / len(train_loader)
    if experiment == "classification":
        accuracy = accuracy_score(all_labels, all_preds)
        return avg_loss, accuracy, time.time() - start_time
    else:
        return avg_loss, time.time() - start_time


def validate_loop(model, val_loader, criterion, experiment="classification"):
    """验证循环（训练时使用）"""
    model.eval()
    total_loss = 0
    all_preds = []
    all_labels = []
    start_time = time.time()

    tqdm_val_loader = tqdm.tqdm(val_loader, desc="Validation", leave=False)
    with torch.no_grad():
        for batch in tqdm_val_loader:
            batch = [item.to(config.DEVICE) for item in batch]
            outputs = model(batch)
            loss = criterion(outputs, batch[2])
            total_loss += loss.item()

            if experiment == "classification":
                all_preds.extend(outputs.argmax(dim=1).cpu().numpy())
                all_labels.extend(batch[2].cpu().numpy())
            else:
                all_preds.extend(outputs.cpu().numpy())   # 已经是一维了
                all_labels.extend(batch[2].cpu().numpy())

            tqdm_val_loader.set_postfix(loss=loss.item())

    avg_loss = total_loss / len(val_loader)
    infer_time = time.time() - start_time

    if experiment == "classification":
        accuracy = accuracy_score(all_labels, all_preds)
        val_f1 = f1_score(all_labels, all_preds, average="macro")
        return avg_loss, accuracy, val_f1, all_labels, all_preds, infer_time
    else:
        return avg_loss, all_labels, all_preds, infer_time


def run_classification(args, train_loader, val_loader, train_dataset: ADHDDataset, early_stop=True):
    """运行分类模型训练"""
    loss = {"train_loss": [], "val_loss": []} 
    # ===模型===
    model_dict = {
        "mobile": MobileNetClassifier,
        "alex": AlexNetClassifier,
        "res": ResNetClassifier,
        "transformer": TransformerClassifier
    }
    common_kwargs = dict(
        num_classes=2,
        num_cat_features=len(train_dataset.cat_cols),
        num_cont_features=len(train_dataset.num_cols),
        embedding_dim=config.EMBEDDING_DIM,
        hidden_dim=config.HIDDEN_DIM,
        dropout=config.DROPOUT
    )
    extra_kwargs = {
        "transformer": dict(
            heads=config.HEADS,
            depth=config.DEPTH,
            categories=tuple(train_dataset.df[col].nunique() for col in train_dataset.cat_cols)
        ),
        "mobile": {},
        "alex": {},
        "res": {}
    }
    model = model_dict[args.model](**common_kwargs, **extra_kwargs[args.model]).to(config.DEVICE)
    
    # ===优化器===
    optimizer = torch.optim.Adam(model.parameters(), lr=config.LEARNING_RATE)
   
    # ===损失函数===
    label_counts = train_dataset.df[config.LABEL_COL].value_counts().sort_index()
    weights = 1.0 / label_counts.values
    weights = weights / weights.sum()
    weights = torch.tensor(weights, dtype=torch.float32).to(config.DEVICE)
    criterion = torch.nn.CrossEntropyLoss(weight=weights)
    
    # ===训练+验证===
    num_epochs = config.EPOCHS
    best_val_f1 = 0
    total_train_time = 0
    patience = 2
    counter = 0

    for epoch in range(num_epochs):
        train_loss, train_acc, train_time = train_loop(model, train_loader, optimizer, criterion, experiment="classification")
        val_loss, val_acc, val_f1, labels, preds, infer_time = validate_loop(model, val_loader, criterion, experiment="classification")
        total_train_time += train_time
        
        loss["train_loss"].append(train_loss)
        loss["val_loss"].append(val_loss)

        print(f"Epoch {epoch+1}/{num_epochs} | "
              f"Train Loss: {train_loss:.4f} Acc: {train_acc:.4f} | "
              f"Val Loss: {val_loss:.4f} Acc: {val_acc:.4f} F1: {val_f1:.4f}")

        # 如果模型更好
        if val_f1 > best_val_f1:
            best_val_f1 = val_f1
            counter = 0

            # 更新指标
            precision = precision_score(labels, preds, average="macro")
            recall = recall_score(labels, preds, average="macro")
            f1 = f1_score(labels, preds, average="macro")
            roc_auc = roc_auc_score(labels, preds)
            cpa = balanced_accuracy_score(labels, preds)
            final_infer_time = infer_time

        else:
            if early_stop: 
                counter += 1
                print(f"  No improvement in F1. EarlyStopping counter: {counter}/{patience}")
                if counter >= patience:
                    print("Early stopping triggered!")
                    break

    # 模型复杂度
    dummy_cat, dummy_cont, dummy_y = data_utils.generate_dummy_batch(train_dataset, batch_size=1, device=config.DEVICE)
    flops, params = profile(model, inputs=((dummy_cat, dummy_cont, dummy_y),), verbose=False)

    # ===返回 metrics 字典===
    metrics = {
        "acc": float(val_acc),
        "f1": float(f1),
        "precision": float(precision),
        "recall": float(recall),
        "roc_auc": float(roc_auc) if roc_auc is not None else None,
        "cpa": float(cpa),
        "params": int(params),
        "flops": int(flops),
        "train_time": float(total_train_time),
        "infer_time": float(final_infer_time)
    }

    # 返回模型权重
    return metrics, loss, confusion_matrix(labels, preds), model.state_dict()


def run_regression(args, train_loader, val_loader, train_dataset: ADHDDataset, early_stop=True):
    """运行回归模型训练"""
    loss = {"train_loss": [], "val_loss": []}  # 保存每个 epoch 的 loss
    # ===模型===
    model_dict = {
        "mlp": MLPRegressor,
        "transformer": TransformerRegressor,
        "rnn": RNNRegressor,
        "lstm": LSTMRegressor
    }
    common_kwargs = dict(
        num_cat_features=len(train_dataset.cat_cols),
        num_cont_features=len(train_dataset.num_cols),
        num_classes=1,
        hidden_dim=config.HIDDEN_DIM,
        embedding_dim=config.EMBEDDING_DIM,
        dropout=config.DROPOUT
    )
    extra_kwargs = {
        "mlp": {"hidden_dims": config.HIDDEN_DIMS_MLP},
        "transformer": dict(
            heads=config.HEADS,
            depth=config.DEPTH,
            categories=tuple(train_dataset.df[col].nunique() for col in train_dataset.cat_cols)
        ),
        "rnn": {},
        "lstm": {"num_layers": config.NUM_LAYERS, "bidirectional": config.BIDIRECTIONAL},
    }
    model = model_dict[args.model](**common_kwargs, **extra_kwargs[args.model]).to(config.DEVICE)

    # ===优化器===
    optimizer = torch.optim.Adam(model.parameters(), lr=config.LEARNING_RATE)

    # ===损失函数===
    criterion = torch.nn.MSELoss()

    # ===训练+验证===
    num_epochs = config.EPOCHS
    best_val_loss = float('inf')
    total_train_time = 0
    patience = 5
    counter = 0

    for epoch in range(num_epochs):
        train_loss, train_time = train_loop(model, train_loader, optimizer, criterion, experiment="regression")
        val_loss, labels, preds, val_infer_time = validate_loop(model, val_loader, criterion, experiment="regression")
        total_train_time += train_time

        loss["train_loss"].append(train_loss)
        loss["val_loss"].append(val_loss)

        print(f"Epoch {epoch+1}/{num_epochs} | Train Loss: {train_loss:.4f} | Val Loss: {val_loss:.4f}")

        # 如果模型更好  
        if val_loss < best_val_loss:
            best_val_loss = val_loss
            counter = 0
            # 更新指标
            mse = mean_squared_error(labels, preds)
            mae = mean_absolute_error(labels, preds)
            r2 = r2_score(labels, preds)
            rmse = np.sqrt(mse)
            final_infer_time = val_infer_time
            final_labels = labels
            final_preds = preds
        else:
            if early_stop:
                counter += 1
                print(f"  No improvement in Loss. EarlyStopping counter: {counter}/{patience}")
                if counter >= patience:
                    print("Early stopping triggered!")
                    break

    # 模型复杂度
    dummy_cat, dummy_cont, dummy_y = data_utils.generate_dummy_batch(train_dataset, batch_size=1, device=config.DEVICE)
    flops, params = profile(model, inputs=((dummy_cat, dummy_cont, dummy_y),), verbose=False)

    # ===返回 metrics 字典===
    metrics = {
        "mse": float(mse),
        "mae": float(mae),
        "r2": float(r2),
        "rmse": float(rmse),
        "train_time": float(total_train_time),
        "infer_time": float(final_infer_time),
        "params": int(params),
        "flops": int(flops),
    }

    return metrics, loss, final_labels, final_preds, model.state_dict()
