import time
import numpy as np
from src.utils import data_utils
from src.data.dataset import ADHDDataset
from src.models.classification.dl_mobilenet import MobileNetClassifier
from src.models.classification.dl_alexnet import AlexNetClassifier
from src.models.classification.dl_resnet import ResNetClassifier
from src.models.classification.dl_transformer import TransformerClassifier
from src.models.regression.dl_mlp import MLPRegressor
from src.models.regression.dl_transformer import TransformerRegressor
from src.models.regression.dl_rnn import RNNRegressor
from src.models.regression.dl_lstm import LSTMRegressor
import torch
torch.set_num_threads(1)
import tqdm
import config.config_dl as config
from sklearn.metrics import (
    accuracy_score, classification_report, precision_score, 
    recall_score, f1_score, roc_auc_score, confusion_matrix, balanced_accuracy_score,
    mean_squared_error, mean_absolute_error, r2_score
)
from src.utils.visualize import plot_confusion_matrix, plot_metrics, plot_predicted_vs_true
from thop import profile

def check_keys(model, pretrained_state_dict):
    """检查模型权重键值"""
    ckpt_keys = set(pretrained_state_dict.keys())
    model_keys = set(model.state_dict().keys())
    used_pretrained_keys = model_keys & ckpt_keys
    unused_pretrained_keys = ckpt_keys - model_keys
    missing_keys = model_keys - ckpt_keys
    # filter 'num_batches_tracked'
    missing_keys = [x for x in missing_keys
                    if not x.endswith('num_batches_tracked')]
    if len(missing_keys) > 0:
        print('[Warning] missing keys: {}'.format(missing_keys))
        print('missing keys:{}'.format(len(missing_keys)))
    if len(unused_pretrained_keys) > 0:
        print('[Warning] unused_pretrained_keys: {}'.format(
            unused_pretrained_keys))
        print('unused checkpoint keys:{}'.format(
            len(unused_pretrained_keys)))
    print('used keys:{}'.format(len(used_pretrained_keys)))

    assert len(used_pretrained_keys) > 0, \
        'check_key load NONE from pretrained checkpoint'
    return True

def compare_state_dict_with_model(state_dict, model):
    """
    比较 state_dict 和已初始化模型的参数。
    
    参数：
        state_dict (dict): 模型的参数字典
        model (nn.Module): 已初始化的模型对象
    
    输出：
        打印每一层的名字、形状对比，并标记不匹配的层
    """
    # 获取模型参数
    model_params = dict(model.named_parameters())
    
    print(f"{'StateDict Layer':40} {'Shape':20} {'Model Layer':40} {'Shape':20} {'Match'}")
    print("="*130)
    
    # 比较
    all_keys = set(state_dict.keys()).union(set(model_params.keys()))
    for key in all_keys:
        ckpt_param = state_dict.get(key, None)
        model_param = model_params.get(key, None)
        
        ckpt_shape = tuple(ckpt_param.shape) if ckpt_param is not None else "MISSING"
        model_shape = tuple(model_param.shape) if model_param is not None else "MISSING"
        
        match = ckpt_shape == model_shape
        print(f"{key:40} {str(ckpt_shape):20} {key:40} {str(model_shape):20} {match}")


def validate_loop(model, val_loader, criterion, experiment="classification"):
    """验证循环（评估时使用）"""
    model.eval()
    total_loss = 0
    all_preds = []
    all_labels = []
    start_time = time.time()

    tqdm_val_loader = tqdm.tqdm(val_loader, desc="Validation", leave=False)
    with torch.no_grad():
        for batch in tqdm_val_loader:
            batch = [item.to(config.DEVICE) for item in batch]
            outputs = model(batch)
            loss = criterion(outputs, batch[2])
            total_loss += loss.item()

            if experiment == "classification":
                all_preds.extend(outputs.argmax(dim=1).cpu().numpy())
                all_labels.extend(batch[2].cpu().numpy())
            else:
                all_preds.extend(outputs.cpu().numpy())   # 已经是一维了
                all_labels.extend(batch[2].cpu().numpy())

            tqdm_val_loader.set_postfix(loss=loss.item())

    avg_loss = total_loss / len(val_loader)
    infer_time = time.time() - start_time

    if experiment == "classification":
        accuracy = accuracy_score(all_labels, all_preds)
        val_f1 = f1_score(all_labels, all_preds, average="macro")
        return avg_loss, accuracy, val_f1, all_labels, all_preds, infer_time
    else:
        return avg_loss, all_labels, all_preds, infer_time


def validate_classification(args, val_loader, dataset: ADHDDataset, model_state_dict=None, categories=None):
    """分类模型评估"""
    model_dict = {
        "mobile": MobileNetClassifier,
        "alex": AlexNetClassifier,
        "res": ResNetClassifier,
        "transformer": TransformerClassifier
    }
    common_kwargs = dict(
        num_classes=2,
        num_cat_features=len(dataset.cat_cols),
        num_cont_features=len(dataset.num_cols),
        embedding_dim=config.EMBEDDING_DIM,
        hidden_dim=config.HIDDEN_DIM,
        dropout=config.DROPOUT
    )
    extra_kwargs = {
        "transformer": dict(
            heads=config.HEADS,
            depth=config.DEPTH,
            categories=categories
        ),
        "mobile": {},
        "alex": {},
        "res": {}
    }
    model = model_dict[args.model](**common_kwargs, **extra_kwargs[args.model]).to(config.DEVICE)

    # === 加载训练好的权重 ===
    if model_state_dict is not None:
        if args.model == "mobile":
            new_state_dict = {
                ("embedding." + k if "cat_embeddings" in k or "cont_fc" in k else k): v
                    for k, v in model_state_dict.items()
                }
        else:
            new_state_dict = model_state_dict
        print("Loading model weights...")
        # 调试用，新模型的时候调用一次就可以了，看看保存的和定义的哪里对不上
        # check_keys(model, new_state_dict)
        # compare_state_dict_with_model(new_state_dict, model)
        model.load_state_dict(new_state_dict, strict=False)

    criterion = torch.nn.CrossEntropyLoss()

    val_loss, val_acc, val_f1, labels, preds, infer_time = validate_loop(
        model, val_loader, criterion, experiment="classification"
    )

    precision = precision_score(labels, preds, average="macro")
    recall = recall_score(labels, preds, average="macro")
    roc_auc = roc_auc_score(labels, preds)
    cpa = balanced_accuracy_score(labels, preds)

    metrics = {
        "acc": float(val_acc),
        "f1": float(val_f1),
        "precision": float(precision),
        "recall": float(recall),
        "roc_auc": float(roc_auc) if roc_auc is not None else None,
        "cpa": float(cpa),
        "infer_time": float(infer_time)
    }

    return metrics, {"val_loss": val_loss}, confusion_matrix(labels, preds)


def validate_regression(args, val_loader, dataset: ADHDDataset, model_state_dict=None, categories=None):
    """回归模型评估"""
    # ===模型===
    model_dict = {
        "mlp": MLPRegressor,
        "transformer": TransformerRegressor,
        "rnn": RNNRegressor,
        "lstm": LSTMRegressor
    }
    common_kwargs = dict(
        num_cat_features=len(dataset.cat_cols),
        num_cont_features=len(dataset.num_cols),
        num_classes=1,
        hidden_dim=config.HIDDEN_DIM,
        embedding_dim=config.EMBEDDING_DIM,
        dropout=config.DROPOUT
    )
    extra_kwargs = {
        "mlp": {"hidden_dims": config.HIDDEN_DIMS_MLP},
        "transformer": dict(
            heads=config.HEADS,
            depth=config.DEPTH,
            categories=categories
        ),
        "rnn": {},
        "lstm": {"num_layers": config.NUM_LAYERS, "bidirectional": config.BIDIRECTIONAL},
    }
    model = model_dict[args.model](**common_kwargs, **extra_kwargs[args.model]).to(config.DEVICE)

    # ===损失函数===
    criterion = torch.nn.MSELoss()

     # === 加载训练好的权重 ===
    if model_state_dict is not None:
        print("Loading model weights...")
        # 调试用，新模型的时候调用一次就可以了，看看保存的和定义的哪里对不上
        # check_keys(model, model_state_dict)
        # compare_state_dict_with_model(model_state_dict, model)
        model.load_state_dict(model_state_dict, strict=False)

    # ===验证===
    val_loss, labels, preds, infer_time = validate_loop(
        model, val_loader, criterion, experiment="regression"
    )

    mse = mean_squared_error(labels, preds)
    mae = mean_absolute_error(labels, preds)
    r2 = r2_score(labels, preds)
    rmse = np.sqrt(mse)

    metrics = {
        "mse": float(mse),
        "mae": float(mae),
        "r2": float(r2),
        "rmse": float(rmse),
        "infer_time": float(infer_time)
    }

    return metrics, {"val_loss": val_loss}, labels, preds
