import torch

import numpy as np
import torch.optim as optim
import torch.nn as nn

from tqdm import tqdm

from ecgcmr.utils.train_evaluate import r2_score_np, correlation_with_significance, cosine_with_warmup_scheduler
from ecgcmr.utils.misc import move_batch_to_device
from ecgcmr.utils.train_evaluate import RegressionHead, AttentionPool, ClassificationHead


from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.preprocessing import StandardScaler
from scipy.stats import pearsonr
from torchmetrics.functional import r2_score
from torch.optim.lr_scheduler import CosineAnnealingLR

from transformers import ViTMAEConfig, ViTMAEModel

from sklearn.metrics import (
    roc_auc_score,
    precision_recall_curve,
    auc,
    confusion_matrix,
    precision_recall_fscore_support
)


# LINEAR PROBE of ECG model
def train_regression_ecg(ecg_model, reg_head, train_loader, val_loader, device="cuda", epochs=10, lr=1e-4, weight_decay=1e-7):
    """
    ecg_model: The frozen ECG encoder
    reg_head:  A small linear model or MLP for regression
    train_loader, val_loader: DataLoaders returning (ecg_data, task_labels, disease_labels)
    """
    for param in ecg_model.parameters():
        param.requires_grad = False
        
    ecg_model = ecg_model.to(device)
    reg_head = reg_head.to(device)

    criterion = nn.MSELoss()

    optimizer = optim.AdamW(
        list(reg_head.parameters()),
        lr=lr,
        weight_decay=weight_decay
    )
    
    best_val_loss = float('inf')
    best_metrics = None
    
    for epoch in range(epochs):
        print(f"=== Regression Epoch {epoch+1}/{epochs} ===")

        # aggregator.train()
        reg_head.train()
        ecg_model.eval()
        
        total_train_loss = 0

        for batch in tqdm(train_loader, desc="Training"):
            ecg_data = batch["ecg_data"].to(device)
            task_labels = batch["task_labels"].to(device)
            # disease_labels = batch["disease_labels"].to(device)
            # rpeaks = batch["rpeaks"].to(device)

            optimizer.zero_grad()

            with torch.no_grad():
                ecg_output = ecg_model(pixel_values=ecg_data, apply_masking=False, use_layernorm=False).last_hidden_state
                ecg_embeddings = ecg_output[:, 1:]
                # local_embeddings = heartbeat_representation(ecg_embeddings=ecg_embeddings, rpeaks=rpeaks) # [B, T, D]
                global_embeddigns = torch.mean(ecg_embeddings, dim=1) # [B, D]
                
                # final_emb = aggregator(global_embeddigns, local_embeddings)

            preds = reg_head(global_embeddigns)  # shape [batch_size, 10]
            loss = criterion(preds, task_labels)
            loss.backward()
            optimizer.step()

            total_train_loss += loss.item() * ecg_data.size(0)
        
        avg_train_loss = total_train_loss / len(train_loader.dataset)

        reg_head.eval()
        ecg_model.eval()
        
        total_val_loss = 0
        all_preds, all_targets = [], []
        with torch.no_grad():
            for batch in tqdm(val_loader, desc="Validation"):
                ecg_data = batch["ecg_data"].to(device)
                task_labels = batch["task_labels"].to(device)
                # disease_labels = batch["disease_labels"].to(device)
                # rpeaks = batch["rpeaks"].to(device)

                ecg_output = ecg_model(pixel_values=ecg_data, apply_masking=False, use_layernorm=False).last_hidden_state
                ecg_embeddings = ecg_output[:, 1:]
                # local_embeddings = heartbeat_representation(ecg_embeddings=ecg_embeddings, rpeaks=rpeaks) # [B, T, D]
                global_embeddigns = torch.mean(ecg_embeddings, dim=1) # [B, D]

                # final_emb = aggregator(global_embeddigns, local_embeddings)

                preds = reg_head(global_embeddigns)
                loss = criterion(preds, task_labels)
                total_val_loss += loss.item() * ecg_data.size(0)

                all_preds.append(preds.cpu())
                all_targets.append(task_labels.cpu())
        
        avg_val_loss = total_val_loss / len(val_loader.dataset)
        print(f"TrainLoss={avg_train_loss:.4f}, ValLoss={avg_val_loss:.4f}")

        # R^2 and correlation
        all_preds = torch.cat(all_preds, dim=0)   # shape [N_val, 10]
        all_targets = torch.cat(all_targets, dim=0) # shape [N_val, 10]

        preds_np = all_preds.numpy()
        targets_np = all_targets.numpy()
        r2 = r2_score_np(targets_np, preds_np)
        corr, p_values, ci_lower, ci_upper = correlation_with_significance(targets_np, preds_np)
        
        # Format the results
        significance_levels = []
        for p in p_values:
            if p < 1e-10:
                significance_levels.append("***")
            elif p < 1e-8:
                significance_levels.append("**")
            elif p < 1e-6:
                significance_levels.append("*")
            else:
                significance_levels.append("")
        
        # Print results
        print("R^2 per dimension:", r2)
        for i, (c, p, sig, lo, hi) in enumerate(zip(corr, p_values, significance_levels, ci_lower, ci_upper)):
            print(f"Dimension {i+1}: Correlation={c:.3f}, p-value={p:.6e} {sig}, CI=[{lo:.3f}, {hi:.3f}]")
            
        if avg_val_loss < best_val_loss:
            best_val_loss = avg_val_loss
            # store the best metrics
            best_metrics = {
                "r2": r2,         # array of shape [num_dims]
                "corr": corr,     # array of shape [num_dims]
                "p_values": p_values,
                "ci_lower": ci_lower,
                "ci_upper": ci_upper
            }

    return best_val_loss, best_metrics


def train_ecg_regression_supervised(ecg_encoder, reg_head, train_loader, val_loader, device="cuda", epochs=10):
    """
    mri_model: The frozen MRI ecg_encoder
    reg_head:  A small linear model or MLP for regression
    train_loader, val_loader: DataLoaders returning (mri, task_labels)
    """
    for param in ecg_encoder.parameters():
        param.requires_grad = True
        
    ecg_encoder = ecg_encoder.to(device)
    reg_head = reg_head.to(device)

    criterion = nn.MSELoss()
    
    base_lr = 3e-4
    warmup_epochs = 1
    min_lr = 1e-7
    
    optimizer = optim.AdamW(
        list(ecg_encoder.parameters()) + list(reg_head.parameters()),
        lr=base_lr,
        weight_decay=1e-6
    )

    scheduler = cosine_with_warmup_scheduler(optimizer, warmup_epochs, epochs, base_lr, min_lr)

    for epoch in range(epochs):
        print(f"=== Regression Epoch {epoch+1}/{epochs} ===")

        reg_head.train()
        ecg_encoder.train()
        
        total_train_loss = 0
        
        with torch.autocast(device_type="cuda", dtype=torch.bfloat16):
            for batch in tqdm(train_loader, desc="Training"):
                ecg_data    = batch["ecg_data"].to(device)
                task_labels = batch["task_labels"].to(device)

                optimizer.zero_grad()

                ecg_output = ecg_encoder(pixel_values=ecg_data, apply_masking=False, use_layernorm=True).last_hidden_state
                ecg_embeddings = ecg_output[:, 1:]
                global_embeddigns = torch.mean(ecg_embeddings, dim=1) # [B, D]

                preds = reg_head(global_embeddigns)  # shape [batch_size, 10]
                loss = criterion(preds, task_labels)
                loss.backward()
                optimizer.step()

                total_train_loss += loss.item() * ecg_data.size(0)
        
        avg_train_loss = total_train_loss / len(train_loader.dataset)

        reg_head.eval()
        ecg_encoder.eval()
        
        total_val_loss = 0
        all_preds, all_targets = [], []
        
        with torch.autocast(device_type="cuda", dtype=torch.bfloat16):
            with torch.no_grad():
                for batch in tqdm(val_loader, desc="Validation"):
                    ecg_data    = batch["ecg_data"].to(device)
                    task_labels = batch["task_labels"].to(device)

                    ecg_output = ecg_encoder(pixel_values=ecg_data, apply_masking=False, use_layernorm=True).last_hidden_state
                    ecg_embeddings = ecg_output[:, 1:]
                    global_embeddigns = torch.mean(ecg_embeddings, dim=1) # [B, D]

                    preds = reg_head(global_embeddigns)
                    loss = criterion(preds, task_labels)
                    total_val_loss += loss.item() * ecg_data.size(0)

                    all_preds.append(preds.cpu())
                    all_targets.append(task_labels.cpu())
        
        avg_val_loss = total_val_loss / len(val_loader.dataset)
        print(f"TrainLoss={avg_train_loss:.4f}, ValLoss={avg_val_loss:.4f}")

        # R^2 and correlation
        all_preds = torch.cat(all_preds, dim=0)   # shape [N_val, 10]
        all_targets = torch.cat(all_targets, dim=0) # shape [N_val, 10]

        preds_np = all_preds.to(torch.float32).numpy()
        targets_np = all_targets.to(torch.float32).numpy()
        
        r2 = r2_score_np(targets_np, preds_np)
        corr, p_values, ci_lower, ci_upper = correlation_with_significance(targets_np, preds_np)
        
        # Format the results
        significance_levels = []
        for p in p_values:
            if p < 1e-10:
                significance_levels.append("***")
            elif p < 1e-8:
                significance_levels.append("**")
            elif p < 1e-6:
                significance_levels.append("*")
            else:
                significance_levels.append("")
        
        # Print results
        print("R^2 per dimension:", r2)
        for i, (c, p, sig, lo, hi) in enumerate(zip(corr, p_values, significance_levels, ci_lower, ci_upper)):
            print(f"Dimension {i+1}: Correlation={c:.3f}, p-value={p:.6e} {sig}, CI=[{lo:.3f}, {hi:.3f}]")

        scheduler.step()
        


def sklearn_regression(
    ecg_model,
    train_loader,
    val_loader,
    mean_train_labels_vol,
    std_train_labels_vol,
    device="cuda",
):
    """
    Replicates the sklearn regression approach used during contrastive training.
    1) Freezes the model
    2) Extracts embeddings in the same way
    3) Scales embeddings with StandardScaler
    4) Fits a LinearRegression on training set
    5) Evaluates on validation set

    Args:
        ecg_model (nn.Module): The pretrained/frozen ECG model (checkpoint loaded).
        train_loader (DataLoader): Dataloader for the train set.
        val_loader (DataLoader): Dataloader for the val set.
        mean_train_labels_vol (np.ndarray): If you used label normalizing in your original code,
                                            pass the means of each label dimension.
        std_train_labels_vol (np.ndarray): Same for the standard deviations.
        device (str): 'cuda' or 'cpu'.
        use_bfloat16 (bool): If True, we wrap the forward pass in torch.autocast(..., dtype=torch.bfloat16)
                             to match your training precision.
        use_cls_token (bool): If your model was trained with a CLS token. 
                              If True, we remove the first token (CLS) from hidden_state.
        apply_layernorm (bool): Matches the argument you used in forward passes.
        apply_masking (bool):  Matches the argument you used in forward passes.

    Returns:
        dict with:
          - 'regressor': trained LinearRegression
          - 'scaler': the fitted StandardScaler
          - 'metrics': a dictionary containing R2 and Pearson across phenotypes
          - 'train_embeddings', 'train_targets': raw arrays of train data
          - 'val_embeddings',   'val_targets': raw arrays of val data
          - 'val_preds': predictions on the val set
    """

    # 1) Freeze model
    ecg_model.eval()
    for param in ecg_model.parameters():
        param.requires_grad = False
    ecg_model = ecg_model.to(device)

    # 2) Prepare data containers
    train_embeds, train_targets = [], []
    val_embeds, val_targets = [], []

    # 2a) For consistent naming
    means_train = mean_train_labels_vol
    stds_train = std_train_labels_vol

    # 3) Embedding extraction helper
    def extract_embeddings(loader, embed_list, targ_list, desc=""):
        print(f"Extracting embeddings for {desc} set...")
        for batch in tqdm(loader):
            batch = move_batch_to_device(batch, device)
            ecg_data, task_labels = batch

            with torch.no_grad():
                with torch.autocast(device_type="cuda", dtype=torch.bfloat16):
                    out = ecg_model(pixel_values=ecg_data,
                                    apply_masking=False,
                                    use_layernorm=False).last_hidden_state
                    emb = out[:, 1:].mean(dim=1).cpu().numpy()

            embed_list.append(emb)
            targ_list.append(task_labels.cpu().numpy())

    # 4) Extract for train and val
    extract_embeddings(train_loader, train_embeds, train_targets, desc="Train")
    extract_embeddings(val_loader, val_embeds, val_targets, desc="Val")

    train_embeds = np.concatenate(train_embeds, axis=0)
    train_targets = np.concatenate(train_targets, axis=0)
    val_embeds   = np.concatenate(val_embeds, axis=0)
    val_targets  = np.concatenate(val_targets, axis=0)


    # 6) StandardScaler
    scaler = StandardScaler()
    train_embeds_scaled = scaler.fit_transform(train_embeds)
    val_embeds_scaled   = scaler.transform(val_embeds)

    # 7) Fit LinearRegression
    regressor = LinearRegression()
    regressor.fit(train_embeds_scaled, train_targets)

    # 8) Predictions
    val_preds   = regressor.predict(val_embeds_scaled)

    # 9) Optionally denormalize predictions if your final metric is in the original scale
    val_preds_denorm = val_preds * stds_train + means_train
    val_targets_denorm = val_targets * stds_train + means_train

    # 10) Evaluate
    # shape of val_targets: [N, #phenotypes]
    # shape of val_preds:   [N, #phenotypes]

    num_phenotypes = val_targets_denorm.shape[1]
    phenotype_names = [f"Phenotype_{i+1}" for i in range(num_phenotypes)]

    metrics = {
        "val_r2": [],
        "val_pearson": [],
        "pearson_ci": [],
        "pearson_pvalue": []
    }

    for i in range(num_phenotypes):
        # R^2 (using torchmetrics)
        r2_val = r2_score(
            preds=torch.from_numpy(val_preds_denorm[:, i]),
            target=torch.from_numpy(val_targets_denorm[:, i])
        )

        # Pearson with confidence intervals
        res = pearsonr(val_preds_denorm[:, i], val_targets_denorm[:, i])
        pearson_corr = res.statistic
        p_value = res.pvalue
        
        # The confidence interval is in 'res.confidence_interval()' (SciPy >=1.10)
        ci = res.confidence_interval()
        ci_low, ci_high = ci.low, ci.high

        metrics["val_r2"].append(r2_val.item())
        metrics["val_pearson"].append(pearson_corr)
        metrics["pearson_ci"].append((ci_low, ci_high))
        metrics["pearson_pvalue"].append(p_value)

        print(f"{phenotype_names[i]} => R2: {r2_val.item():.5f}, "
            f"Pearson: {pearson_corr:.5f}, "
            f"CI=({ci_low:.3f},{ci_high:.3f}), "
            f"p={p_value:.6e}")

    metrics["mean_R2"] = float(np.mean(metrics["val_r2"]))
    metrics["mean_Pearson"] = float(np.mean(metrics["val_pearson"]))
    

def train_fine_tuning_with_grid_search(
    train_loader, 
    val_loader, 
    path_ecg_checkpoint, 
    device="cuda", 
    epochs=10,
    warmup_ratio=0.05,
    patience=5,  # Early stopping patience
):
    """
    Fine-tuning function for ECG encoder and regression head with grid search for hyperparameters.
    """

    # Grid search parameters
    drop_path_rates = [0.0, 0.1, 0.2]
    hidden_dropout_probs = [0.0, 0.1, 0.2]
    weight_decays = [0.0, 0.1, 0.2]
    learning_rates = [1e-6, 3e-6, 1e-5, 3e-5]

    for hidden_dropout_prob in hidden_dropout_probs:
        for drop_path_rate in drop_path_rates:
            for weight_decay in weight_decays:
                for lr in learning_rates:
                    print(
                        f"DropPath={drop_path_rate}, Hidden DropOut Prob={hidden_dropout_prob} "
                        f"WeightDecay={weight_decay}, LR={lr}"
                    )
                    # Load the ECG model
                    ecg_config = ViTMAEConfig.from_pretrained(pretrained_model_name_or_path=path_ecg_checkpoint)
                    ecg_config.hidden_dropout_prob = hidden_dropout_prob
                    ecg_config.drop_path_rate = drop_path_rate
                    
                    ecg_model = ViTMAEModel.from_pretrained(pretrained_model_name_or_path=path_ecg_checkpoint, config=ecg_config)
                    ecg_model = ecg_model.to(device)

                    # Unfreeze parameters in the ECG model
                    for param in ecg_model.parameters():
                        param.requires_grad = True

                    reg_head = RegressionHead(input_dim=512, output_dim=10).to(device)

                    # Define the attention pooling layer
                    attention_pool = AttentionPool(embedding_dim=ecg_model.config.hidden_size).to(device)

                    criterion = nn.MSELoss()

                    # Define the optimizer
                    optimizer = optim.AdamW(
                        list(ecg_model.parameters()) + list(attention_pool.parameters()) + list(reg_head.parameters()),
                        lr=lr,
                        weight_decay=weight_decay,
                    )
                    
                    warmup_epochs = int(warmup_ratio * epochs)
                    scheduler = CosineAnnealingLR(optimizer, T_max=epochs - warmup_epochs)
                    
                    # Early stopping setup
                    best_val_loss = float("inf")
                    epochs_without_improvement = 0
                    
                    for epoch in range(epochs):
                        print(f"=== Fine-Tuning Epoch {epoch+1}/{epochs} ===")

                        ecg_model.train()
                        reg_head.train()
                        attention_pool.train()

                        total_train_loss = 0

                        for batch in tqdm(train_loader, desc="Training"):
                            ecg_data = batch["ecg_data"].to(device)
                            task_labels = batch["task_labels"].to(device)

                            optimizer.zero_grad()

                            # Forward pass through the ECG model
                            ecg_output = ecg_model(pixel_values=ecg_data, apply_masking=False, use_layernorm=False).last_hidden_state

                            # Attention pooling
                            global_embeddings = attention_pool(ecg_output)  # [B, D]

                            # Regression head
                            preds = reg_head(global_embeddings)  # [B, 10]
                            loss = criterion(preds, task_labels)

                            # Backpropagation
                            loss.backward()
                            optimizer.step()

                            total_train_loss += loss.item() * ecg_data.size(0)

                        avg_train_loss = total_train_loss / len(train_loader.dataset)
                        
                        if epoch < warmup_epochs:
                            # Linear warm-up
                            warmup_lr = lr * (epoch + 1) / warmup_epochs
                            for param_group in optimizer.param_groups:
                                param_group["lr"] = warmup_lr
                        else:
                            # Cosine annealing
                            scheduler.step()
                            
                        # Evaluate on validation set
                        ecg_model.eval()
                        reg_head.eval()
                        attention_pool.eval()

                        total_val_loss = 0
                        all_preds, all_targets = [], []
                        with torch.no_grad():
                            for batch in tqdm(val_loader, desc="Validation"):
                                ecg_data = batch["ecg_data"].to(device)
                                task_labels = batch["task_labels"].to(device)

                                ecg_output = ecg_model(pixel_values=ecg_data, apply_masking=False, use_layernorm=False).last_hidden_state
                                global_embeddings = attention_pool(ecg_output)

                                preds = reg_head(global_embeddings)
                                loss = criterion(preds, task_labels)
                                total_val_loss += loss.item() * ecg_data.size(0)

                                all_preds.append(preds.cpu())
                                all_targets.append(task_labels.cpu())

                        avg_val_loss = total_val_loss / len(val_loader.dataset)
                        print(f"TrainLoss={avg_train_loss:.4f}, ValLoss={avg_val_loss:.4f}")
                        
                        # Compute R² and correlation coefficients
                        all_preds = torch.cat(all_preds, dim=0)   # [N_val, 10]
                        all_targets = torch.cat(all_targets, dim=0)  # [N_val, 10]

                        preds_np = all_preds.to(torch.float32).numpy()
                        targets_np = all_targets.to(torch.float32).numpy()
                        
                        r2 = r2_score_np(targets_np, preds_np)
                        corr, p_values, ci_lower, ci_upper = correlation_with_significance(targets_np, preds_np)
                        
                        # Format the results
                        significance_levels = []
                        for p in p_values:
                            if p < 1e-10:
                                significance_levels.append("***")
                            elif p < 1e-8:
                                significance_levels.append("**")
                            elif p < 1e-6:
                                significance_levels.append("*")
                            else:
                                significance_levels.append("")
                        
                        # Print results
                        print("R^2 per dimension:", r2)
                        for i, (c, p, sig, lo, hi) in enumerate(zip(corr, p_values, significance_levels, ci_lower, ci_upper)):
                            print(f"Dimension {i+1}: Correlation={c:.3f}, p-value={p:.6e} {sig}, CI=[{lo:.3f}, {hi:.3f}]")

                        # Early stopping
                        if avg_val_loss < best_val_loss:
                            best_val_loss = avg_val_loss
                            epochs_without_improvement = 0
                        else:
                            epochs_without_improvement += 1
                            if epochs_without_improvement >= patience:
                                print(f"Early stopping triggered at epoch {epoch+1}.")
                                break
        

def grid_search_regression_ecg(
    ecg_model_path,
    train_loader,
    val_loader,
    device="cuda",
    epochs=10,
    lr_values=[1e-4, 3e-4, 1e-3],
    wd_values=[1e-7, 1e-6, 1e-5]
):
    """
    Loads a frozen ECG model from 'ecg_model_path' and does a simple grid search
    over learning rates & weight decay for the linear regression head.
    Returns a dictionary of results for each combination.
    """
    from transformers import ViTMAEConfig, ViTMAEModel
    
    best_config = None
    best_val_loss = float('inf')
    best_val_metrics = None
    all_results = {}

    for lr in lr_values:
        for wd in wd_values:
            print(f"\n=== GRID SEARCH: LR={lr}, WeightDecay={wd} ===")
            
            # 1. Re-load the frozen encoder each time to ensure a fresh start
            ecg_encoder_config = ViTMAEConfig.from_pretrained(ecg_model_path)
            ecg_encoder = ViTMAEModel.from_pretrained(ecg_model_path, config=ecg_encoder_config)

            # 2. Create a fresh RegressionHead
            reg_head = RegressionHead(input_dim=512, output_dim=10)

            # 3. Modify the train function call to accept lr & wd
            final_val_loss, final_metrics = train_regression_ecg(
                ecg_model=ecg_encoder,
                reg_head=reg_head,
                train_loader=train_loader,
                val_loader=val_loader,
                device=device,
                epochs=epochs,
                lr=lr,
                weight_decay=wd
            )
            
            # store the results
            all_results[(lr, wd)] = final_val_loss

            # track the best combination so far
            if final_val_loss < best_val_loss:
                best_val_loss = final_val_loss
                best_config = (lr, wd)
                best_val_metrics = final_metrics
    
    print(f"\nBEST CONFIG: LR={best_config[0]}, WD={best_config[1]} with ValLoss={best_val_loss:.4f}")
    print("Best metrics (at best config):")
    
    if best_val_metrics:
        print("R^2 per dimension:", best_val_metrics["r2"])
        print("Corr per dimension:", best_val_metrics["corr"])

    return all_results

def train_logistic_regression(
    ecg_model,
    train_loader,
    val_loader,
    device="cuda",
    pos_weight_tensor=None,
    disease_names=None,
    threshold=0.5
):
    """
    Trains and evaluates 4 separate XGBoost models (one per disease),
    using GridSearchCV to find the best hyperparameters for each disease.

    Arguments:
    ---------
    ecg_model           : A frozen ECG encoder model.
    train_loader, val_loader : Data loaders that yield batches:
                               "ecg_data" (tensor) and "disease_labels" (tensor of shape [batch_size, 4]).
                               The 4 disease labels correspond to MI, CM, AF, HF.
    device              : "cuda" or "cpu".
    pos_weight_tensor   : Tensor of shape [4] containing positive-class weights for each disease
                          (used in XGBoost as scale_pos_weight).
    disease_names       : List of disease names, e.g. ["MI", "CM", "AF", "HF"].
    threshold           : Probability threshold for converting probabilities to binary predictions.
    """

    # 1) Freeze ecg_model (no gradient updates) and move to device
    ecg_model.eval()
    for param in ecg_model.parameters():
        param.requires_grad = False
    ecg_model = ecg_model.to(device)

    # 2) Collect embeddings & labels from the entire TRAIN set
    train_embeddings = []
    train_labels = []

    with torch.no_grad():
        with torch.autocast(device_type="cuda", dtype=torch.bfloat16):
            for batch in tqdm(train_loader, desc="Extracting train embeddings"):
                ecg_data = batch["ecg_data"].to(device)
                disease_labels = batch["disease_labels"].to(device)

                # Forward pass through the frozen encoder
                output = ecg_model(
                    pixel_values=ecg_data,
                    apply_masking=False,
                    use_layernorm=False
                )
                # Average the hidden states (skipping the first token if desired)
                embeddings = torch.mean(output.last_hidden_state[:, 1:], dim=1)

                # Move to CPU numpy
                train_embeddings.append(embeddings.cpu().numpy())
                train_labels.append(disease_labels.cpu().numpy())

    X_train = np.concatenate(train_embeddings, axis=0)  # shape: [N_train, embedding_dim]
    Y_train = np.concatenate(train_labels, axis=0)      # shape: [N_train, 4]

    # 3) Collect embeddings & labels from the entire VAL set
    val_embeddings = []
    val_labels = []

    with torch.no_grad():
        with torch.autocast(device_type="cuda", dtype=torch.bfloat16):
            for batch in tqdm(val_loader, desc="Extracting val embeddings"):
                ecg_data = batch["ecg_data"].to(device)
                disease_labels = batch["disease_labels"].to(device)

                output = ecg_model(
                    pixel_values=ecg_data,
                    apply_masking=False,
                    use_layernorm=False
                )
                embeddings = torch.mean(output.last_hidden_state[:, 1:], dim=1)

                val_embeddings.append(embeddings.cpu().numpy())
                val_labels.append(disease_labels.cpu().numpy())

    X_val = np.concatenate(val_embeddings, axis=0)  # shape: [N_val, embedding_dim]
    Y_val = np.concatenate(val_labels, axis=0)      # shape: [N_val, 4]

    # 4) Scale the data
    # scaler = StandardScaler()
    # X_train_scaled = scaler.fit_transform(X_train)
    # X_val_scaled   = scaler.transform(X_val)

    num_diseases = Y_train.shape[1]

    # If no pos_weight is provided, default to 1.0 for all diseases
    if pos_weight_tensor is not None:
        pos_weight_tensor = pos_weight_tensor.cpu()
    else:
        pos_weight_tensor = torch.ones(num_diseases)

    models = []

    for i in range(num_diseases):
        y_train_i = Y_train[:, i]
        weight_for_pos = pos_weight_tensor[i].item()

        log_res = LogisticRegression(
            class_weight={0: 1.0, 1: weight_for_pos},
            max_iter=2000,
            C=0.1,
        )

        # Fit grid search on the i-th label
        log_res.fit(X_train, y_train_i)
        models.append(log_res)

    # 6) Evaluate each best model on the validation set
    auc_vals = []
    pr_auc_vals = []
    precision_vals = []
    recall_vals = []
    f1_vals = []
    conf_matrices = []

    for i in range(num_diseases):
        y_scores = models[i].predict_proba(X_val)[:, 1]
        y_true = Y_val[:, i]

        # Skip metrics if no positive or no negative in ground truth
        if np.all(y_true == 0) or np.all(y_true == 1):
            auc_vals.append(np.nan)
            pr_auc_vals.append(np.nan)
            precision_vals.append(np.nan)
            recall_vals.append(np.nan)
            f1_vals.append(np.nan)
            conf_matrices.append(None)
            continue

        # 1) ROC AUC
        rocauc = roc_auc_score(y_true, y_scores)
        auc_vals.append(rocauc)

        # 2) Precision-Recall AUC
        prec_curve, rec_curve, _ = precision_recall_curve(y_true, y_scores)
        prauc = auc(rec_curve, prec_curve)
        pr_auc_vals.append(prauc)

        # 3) Precision, Recall, F1 using the threshold
        y_pred = (y_scores >= threshold).astype(int)
        prec, rec, f1, _ = precision_recall_fscore_support(
            y_true, y_pred, average="binary", zero_division=0
        )
        precision_vals.append(prec)
        recall_vals.append(rec)
        f1_vals.append(f1)

        # 4) Confusion Matrix
        cm = confusion_matrix(y_true, y_pred)
        conf_matrices.append(cm)

    print("\n=== Validation Results ===")
    if disease_names and len(disease_names) == num_diseases:
        for i, name in enumerate(disease_names):
            print(f"\nDisease: {name}")
            print(f"  ROC AUC     : {auc_vals[i]:.4f}")
            print(f"  PR AUC      : {pr_auc_vals[i]:.4f}")
            print(f"  Precision   : {precision_vals[i]:.4f}")
            print(f"  Recall      : {recall_vals[i]:.4f}")
            print(f"  F1          : {f1_vals[i]:.4f}")
            print(f"  Confusion Matrix:\n{conf_matrices[i]}")
    else:
        for i in range(num_diseases):
            print(f"\nDisease {i+1}")
            print(f"  ROC AUC     : {auc_vals[i]:.4f}")
            print(f"  PR AUC      : {pr_auc_vals[i]:.4f}")
            print(f"  Precision   : {precision_vals[i]:.4f}")
            print(f"  Recall      : {recall_vals[i]:.4f}")
            print(f"  F1          : {f1_vals[i]:.4f}")
            print(f"  Confusion Matrix:\n{conf_matrices[i]}")

    # Mean metrics across diseases (ignoring NaNs)
    valid_auc = [v for v in auc_vals if not np.isnan(v)]
    mean_auc = np.mean(valid_auc) if len(valid_auc) > 0 else float('nan')
    valid_pr_auc = [v for v in pr_auc_vals if not np.isnan(v)]
    mean_pr_auc = np.mean(valid_pr_auc) if len(valid_pr_auc) > 0 else float('nan')

    print(f"\nMean ROC AUC across diseases  : {mean_auc:.4f}")
    print(f"Mean PR AUC across diseases   : {mean_pr_auc:.4f}")

def train_classification(
    ecg_model,
    cls_head,
    train_loader,
    val_loader,
    device="cuda",
    epochs=10,
    pos_weight_tensor=None,
    disease_names=None,
    threshold=0.8,
    lr=1e-3
):
    """
    ecg_model: Frozen ECG encoder
    cls_head: classification head => output shape [batch, #diseases_of_interest]
    train_loader, val_loader => yield dict with 'ecg_data','disease_labels' at least
    pos_weight_tensor => shape [#diseases_of_interest], or None
    """
    # Freeze ecg_model
    ecg_model.eval()
    for param in ecg_model.parameters():
        param.requires_grad = False
    ecg_model = ecg_model.to(device)

    cls_head = cls_head.to(device)

    if pos_weight_tensor is not None:
        pos_weight_tensor = pos_weight_tensor.to(device)
        classification_criterion = nn.BCEWithLogitsLoss(pos_weight=pos_weight_tensor)
    else:
        classification_criterion = nn.BCEWithLogitsLoss()

    optimizer = optim.Adam(
        cls_head.parameters(),
        lr=lr,
        weight_decay=1e-4
    )

    for epoch in range(epochs):
        cls_head.train()
        total_train_loss = 0.0

        # TRAIN
        with torch.autocast(device_type="cuda", dtype=torch.bfloat16):
            for batch in tqdm(train_loader, desc="Extracting train embeddings"):
                ecg_data       = batch["ecg_data"].to(device)
                disease_labels = batch["disease_labels"].to(device)
                optimizer.zero_grad()

                # Forward with frozen encoder
                with torch.no_grad():
                    output = ecg_model(pixel_values=ecg_data, apply_masking=False, use_layernorm=False)
                    # output.last_hidden_state => shape [batch, seq_len, hidden_dim]
                    embeddings = torch.mean(output.last_hidden_state[:, 1:], dim=1)

                logits = cls_head(embeddings)  # [batch, #diseases_of_interest]
                loss = classification_criterion(logits, disease_labels)
                loss.backward()
                optimizer.step()

                total_train_loss += loss.item() * ecg_data.size(0)

        avg_train_loss = total_train_loss / len(train_loader.dataset)

        # VAL
        cls_head.eval()
        total_val_loss = 0.0
        all_logits, all_targets = [], []

        with torch.no_grad():
            with torch.autocast(device_type="cuda", dtype=torch.bfloat16):
                for batch in tqdm(val_loader, desc="Extracting val embeddings"):
                    ecg_data       = batch["ecg_data"].to(device)
                    disease_labels = batch["disease_labels"].to(device)

                    output = ecg_model(pixel_values=ecg_data, apply_masking=False, use_layernorm=False)
                    embeddings = torch.mean(output.last_hidden_state[:, 1:], dim=1)

                    logits = cls_head(embeddings)
                    loss   = classification_criterion(logits, disease_labels)
                    total_val_loss += loss.item() * ecg_data.size(0)

                    all_logits.append(logits.cpu())
                    all_targets.append(disease_labels.cpu())

        avg_val_loss = total_val_loss / len(val_loader.dataset)

        # Metrics Calculation
        all_logits_tensor = torch.cat(all_logits, dim=0)   # [N_val, #diseases_of_interest]
        all_targets_tensor = torch.cat(all_targets, dim=0) # [N_val, #diseases_of_interest]
        
        all_logits_tensor = all_logits_tensor.to(torch.float32)
        all_targets_tensor = all_targets_tensor.to(torch.float32)
        
        probs = torch.sigmoid(all_logits_tensor).numpy()   # Convert logits to probabilities
        targets_np = all_targets_tensor.numpy()            # Ground truth
        
        num_diseases = probs.shape[1]
        auc_vals, pr_auc_vals, precision_vals, recall_vals, f1_vals, conf_matrices = [], [], [], [], [], []
        
        for i in range(num_diseases):
            y_true = targets_np[:, i]
            y_prob = probs[:, i]
        
            # Skip metrics calculation for labels with no positive or negative samples
            if np.all(y_true == 0) or np.all(y_true == 1):
                auc_vals.append(float('nan'))
                pr_auc_vals.append(float('nan'))
                precision_vals.append(float('nan'))
                recall_vals.append(float('nan'))
                f1_vals.append(float('nan'))
                conf_matrices.append(None)
                continue
        
            # AUC
            auc_vals.append(roc_auc_score(y_true, y_prob))
        
            # PR-AUC using `auc`
            precision, recall, _ = precision_recall_curve(y_true, y_prob)
            pr_auc_vals.append(auc(recall, precision))
        
            # Precision, Recall, F1 at the threshold
            preds = (y_prob >= threshold).astype(int)
            precision, recall, f1, _ = precision_recall_fscore_support(
                y_true, preds, average="binary", zero_division=0
            )
            precision_vals.append(precision)
            recall_vals.append(recall)
            f1_vals.append(f1)
        
            # Confusion Matrix
            cm = confusion_matrix(y_true, preds)
            conf_matrices.append(cm)
        
        # Display Results
        print(f"Epoch {epoch+1}/{epochs}: TrainLoss={avg_train_loss:.4f}, ValLoss={avg_val_loss:.4f}")
        
        if disease_names:
            print("\nMetrics per Disease:")
            for i, name in enumerate(disease_names):
                print(f"  {name}: AUC={auc_vals[i]:.4f}, PR-AUC={pr_auc_vals[i]:.4f}, "
                      f"Precision={precision_vals[i]:.4f}, Recall={recall_vals[i]:.4f}, F1={f1_vals[i]:.4f}")
                print(f"    Confusion Matrix:\n{conf_matrices[i]}")
        else:
            print("\nMetrics per Disease:")
            for i in range(num_diseases):
                print(f"  Disease-{i}: AUC={auc_vals[i]:.4f}, PR-AUC={pr_auc_vals[i]:.4f}, "
                      f"Precision={precision_vals[i]:.4f}, Recall={recall_vals[i]:.4f}, F1={f1_vals[i]:.4f}")
                print(f"    Confusion Matrix:\n{conf_matrices[i]}")
        
        # Calculate Mean Metrics
        valid_aucs = [v for v in auc_vals if not np.isnan(v)]
        mean_auc = np.mean(valid_aucs) if valid_aucs else 0.0
        print(f"\nMean AUC: {mean_auc:.4f}")
        
        valid_pr_aucs = [v for v in pr_auc_vals if not np.isnan(v)]
        mean_pr_auc = np.mean(valid_pr_aucs) if valid_pr_aucs else 0.0
        print(f"Mean PR-AUC: {mean_pr_auc:.4f}")
        
    print("Training complete.")