import torch
import os
import numpy as np
import pandas as pd
import torch.nn.functional as F

from tqdm import tqdm
from IPython.display import HTML
from matplotlib.animation import FuncAnimation
from scipy.stats import t
import matplotlib.pyplot as plt

from transformers import VideoMAEModel, VideoMAEConfig, ViTMAEConfig, ViTMAEModel
from ecgcmr.multimodal.multimodal_models.Projections import ProjectionHeadSimple
from ecgcmr.multimodal.multimodal_utils.form_ed_ed_patches import ECGHeartBeatRepresentation


def plot_mri_as_gif(mri_data):
    fig, ax = plt.subplots()
    def animate(frame):
        ax.clear()
        ax.imshow(mri_data[frame], cmap="gray")
        ax.set_title(f"Frame {frame}")
        ax.axis("off")
    ani = FuncAnimation(fig, animate, frames=mri_data.shape[0], interval=100)
    plt.close(fig)
    return HTML(ani.to_jshtml())

def save_embeddings(output_path, embeddings_dict):
    os.makedirs(output_path, exist_ok=True)
    torch.save(embeddings_dict["ecg_embeddings_global"], os.path.join(output_path, "ecg_embeddings_global.pt"))
    torch.save(embeddings_dict["mri_embeddings_global"], os.path.join(output_path, "mri_embeddings_global.pt"))
    torch.save(embeddings_dict["ecg_embeddings_local"], os.path.join(output_path, "ecg_embeddings_local.pt"))
    torch.save(embeddings_dict["mri_embeddings_local"], os.path.join(output_path, "mri_embeddings_local.pt"))
    np.save(os.path.join(output_path, "eids.npy"), embeddings_dict["eids"])


def load_embeddings(output_path):
    ecg_embeddings_global = torch.load(os.path.join(output_path, "ecg_embeddings_global.pt"))
    mri_embeddings_global = torch.load(os.path.join(output_path, "mri_embeddings_global.pt"))
    ecg_embeddings_local = torch.load(os.path.join(output_path, "ecg_embeddings_local.pt"))
    mri_embeddings_local = torch.load(os.path.join(output_path, "mri_embeddings_local.pt"))
    eids = np.load(os.path.join(output_path, "eids.npy"), allow_pickle=True).tolist()
    
    return {
        "ecg_embeddings_global": ecg_embeddings_global,
        "mri_embeddings_global": mri_embeddings_global,
        "ecg_embeddings_local": ecg_embeddings_local,
        "mri_embeddings_local": mri_embeddings_local,
        "eids": eids
    }


def load_checkpoints(cfg, path_ecg, path_mri, path_model_checkpoint, model_type):
    """
    Load model checkpoints for ECG and MRI encoders, along with projection heads and optional reducers.

    Args:
        cfg (DictConfig): Configuration object.
        path_ecg (str): Path to the ECG encoder checkpoint.
        path_mri (str): Path to the MRI encoder checkpoint.
        path_model_checkpoint (str): Path to the model checkpoint.
        model_type (str): Type of model to load ('global', 'local', 'both').

    Returns:
        Tuple: Models and optionally loaded components based on the model type.
    """
    # Load ECG Encoder
    ecg_config = ViTMAEConfig.from_pretrained(pretrained_model_name_or_path=path_ecg)
    ecg_model = ViTMAEModel.from_pretrained(pretrained_model_name_or_path=path_ecg, config=ecg_config)

    # Load MRI Encoder
    mri_config = VideoMAEConfig.from_pretrained(pretrained_model_name_or_path=path_mri)
    mri_model = VideoMAEModel.from_pretrained(pretrained_model_name_or_path=path_mri, config=mri_config)

    checkpoint = torch.load(path_model_checkpoint, map_location="cpu")

    projection_ecg_global, projection_ecg_local = None, None
    projection_mri_global, projection_mri_local = None, None
    layernorm_ecg_global, layernorm_ecg_local = None, None
    layernorm_mri_global, layernorm_mri_local = None, None

    if model_type in ['global', 'both']:
        projection_ecg_global = ProjectionHeadSimple(
            input_dim=ecg_config.hidden_size,
            hidden_dim=cfg.models.projection.hidden_dim,
            output_dim=cfg.models.projection.d_contrastive
        )
        projection_ecg_global.load_state_dict(checkpoint["projection_ecg_global"])

        projection_mri_global = ProjectionHeadSimple(
            input_dim=mri_config.hidden_size,
            hidden_dim=cfg.models.projection.hidden_dim,
            output_dim=cfg.models.projection.d_contrastive
        )
        projection_mri_global.load_state_dict(checkpoint["projection_image_global"])

        layernorm_ecg_global = torch.nn.LayerNorm(ecg_config.hidden_size, dtype=torch.float32)
        layernorm_ecg_global.load_state_dict({
            "weight": checkpoint["reducer"]["ecg_layernorm_global.weight"],
            "bias": checkpoint["reducer"]["ecg_layernorm_global.bias"]
        })

        layernorm_mri_global = torch.nn.LayerNorm(mri_config.hidden_size, dtype=torch.float32)
        layernorm_mri_global.load_state_dict({
            "weight": checkpoint["reducer"]["image_layernorm_global.weight"],
            "bias": checkpoint["reducer"]["image_layernorm_global.bias"]
        })

    if model_type in ['local', 'both']:
        projection_ecg_local = ProjectionHeadSimple(
            input_dim=ecg_config.hidden_size,
            hidden_dim=cfg.models.projection.hidden_dim,
            output_dim=cfg.models.projection.d_contrastive
        )
        projection_ecg_local.load_state_dict(checkpoint["projection_ecg_local"])

        projection_mri_local = ProjectionHeadSimple(
            input_dim=mri_config.hidden_size,
            hidden_dim=cfg.models.projection.hidden_dim,
            output_dim=cfg.models.projection.d_contrastive
        )
        projection_mri_local.load_state_dict(checkpoint["projection_image_local"])

    return {
        "ecg_model": ecg_model,
        "mri_model": mri_model,
        "projection_ecg_global": projection_ecg_global,
        "projection_mri_global": projection_mri_global,
        "layernorm_ecg_global": layernorm_ecg_global,
        "layernorm_mri_global": layernorm_mri_global,
        "projection_ecg_local": projection_ecg_local,
        "projection_mri_local": projection_mri_local,
    }

def load_checkpoint_lightning(main_path, model_type, cfg):
    """
    Function to load the full model checkpoint along with ECG and MRI encoders.

    Args:
        main_path (str): Path to the directory containing the checkpoint files.
        model_type (str): Type of model to load ('global', 'local', or 'both').
        cfg: Configuration object containing model hyperparameters.

    Returns:
        dict: Dictionary containing the loaded models and modules.
    """
    # Paths to checkpoints
    path_ecg = f"{main_path}/hf_ecg_model"
    path_mri = f"{main_path}/hf_mri_model"
    checkpoint_dir = f"{main_path}/checkpoints"
    
    # checkpoint_files = [f for f in os.listdir(checkpoint_dir) if f.endswith(".ckpt")]
    # assert len(checkpoint_files) == 1, f"Expected one checkpoint file, but found {len(checkpoint_files)}: {checkpoint_files}"
    path_model_checkpoint = os.path.join(checkpoint_dir, "last.ckpt")

    # Load ECG Encoder
    ecg_config = ViTMAEConfig.from_pretrained(pretrained_model_name_or_path=path_ecg)
    ecg_model = ViTMAEModel.from_pretrained(pretrained_model_name_or_path=path_ecg, config=ecg_config)

    # Load MRI Encoder
    mri_config = VideoMAEConfig.from_pretrained(pretrained_model_name_or_path=path_mri)
    mri_model = VideoMAEModel.from_pretrained(pretrained_model_name_or_path=path_mri, config=mri_config)

    # Load Main Checkpoint
    checkpoint = torch.load(path_model_checkpoint, map_location="cpu")
    state_dict = checkpoint["state_dict"]

    # Initialize projection heads and layer norms
    projection_heads = {}
    layernorms = {}

    if model_type in ['global', 'both']:
        projection_heads['ecg_global'] = ProjectionHeadSimple(
            input_dim=ecg_config.hidden_size,
            hidden_dim=cfg.models.projection.hidden_dim,
            output_dim=cfg.models.projection.d_contrastive
        )
        projection_heads['ecg_global'].load_state_dict({k.replace("projection_ecg_global.", ""): v for k, v in state_dict.items() if k.startswith("projection_ecg_global.")})

        projection_heads['mri_global'] = ProjectionHeadSimple(
            input_dim=mri_config.hidden_size,
            hidden_dim=cfg.models.projection.hidden_dim,
            output_dim=cfg.models.projection.d_contrastive
        )
        projection_heads['mri_global'].load_state_dict({k.replace("projection_image_global.", ""): v for k, v in state_dict.items() if k.startswith("projection_image_global.")})

        layernorms['ecg_global'] = torch.nn.LayerNorm(ecg_config.hidden_size, dtype=torch.float32)
        layernorms['ecg_global'].load_state_dict({
            "weight": state_dict["reducer.ecg_layernorm_global.weight"],
            "bias": state_dict["reducer.ecg_layernorm_global.bias"]
        })

        layernorms['mri_global'] = torch.nn.LayerNorm(mri_config.hidden_size, dtype=torch.float32)
        layernorms['mri_global'].load_state_dict({
            "weight": state_dict["reducer.image_layernorm_global.weight"],
            "bias": state_dict["reducer.image_layernorm_global.bias"]
        })

    if model_type in ['local', 'both']:
        projection_heads['ecg_local'] = ProjectionHeadSimple(
            input_dim=ecg_config.hidden_size,
            hidden_dim=cfg.models.projection.hidden_dim,
            output_dim=cfg.models.projection.d_contrastive
        )
        projection_heads['ecg_local'].load_state_dict({k.replace("projection_ecg_local.", ""): v for k, v in state_dict.items() if k.startswith("projection_ecg_local.")})

        projection_heads['mri_local'] = ProjectionHeadSimple(
            input_dim=mri_config.hidden_size,
            hidden_dim=cfg.models.projection.hidden_dim,
            output_dim=cfg.models.projection.d_contrastive
        )
        projection_heads['mri_local'].load_state_dict({k.replace("projection_image_local.", ""): v for k, v in state_dict.items() if k.startswith("projection_image_local.")})

    return {
        "ecg_model": ecg_model,
        "mri_model": mri_model,
        "projection_ecg_global": projection_heads["ecg_global"],
        "projection_mri_global": projection_heads["mri_global"],
        "layernorm_ecg_global": layernorms["ecg_global"],
        "layernorm_mri_global": layernorms["mri_global"],
        "projection_ecg_local": projection_heads["ecg_local"],
        "projection_mri_local": projection_heads["mri_local"],
    }
    
def calculate_similarity_global(z1, z2):
    """
    Calculate similarity matrix for global embeddings.

    Args:
        z1 (torch.Tensor): global embeddings [N, D].
        z2 (torch.Tensor): global embeddings [M, D].

    Returns:
        torch.Tensor: Similarity matrix [N, M].
    """
    z1_normalized = F.normalize(z1, p=2, dim=-1)
    z2_normalized = F.normalize(z2, p=2, dim=-1)
    similarity_matrix = torch.matmul(z1_normalized, z2_normalized.T)
    return similarity_matrix

def calculate_similarity_local_full(z1, z2):
    """
    Compute the full [N, M, T1, T2] local similarity
    without aggregation.

    Args:
        z1 (torch.Tensor): [N, T1, D]
        z2 (torch.Tensor): [M, T2, D]

    Returns:
        torch.Tensor: [N, M, T1, T2] similarity for each pair (n,m) and each time-step pair (t1, t2).
    """
    z1_norm = F.normalize(z1, p=2, dim=-1)  # [N, T1, D]
    z2_norm = F.normalize(z2, p=2, dim=-1)  # [M, T2, D]

    sim_4d = torch.einsum('ntd,mkd->nmtk', z1_norm, z2_norm)
    return sim_4d

def calculate_similarity_local_timelevel_retrieval(z1, z2, topk=5):
    """
    1) Compute the full local similarity => [N, M, T1, T2].
    2) Flatten the time-step dimension => shape [N, M*T1*T2].
    3) Retrieve top-k time-step pairs for each query n.

    Args:
        z1 (torch.Tensor): shape [N, T1, D] for queries
        z2 (torch.Tensor): shape [M, T2, D] for keys
        topk (int): number of top time-step pairs to retrieve for each query

    Returns:
        topk_values: [N, topk] => highest similarity scores
        topk_indices: [N, topk] => flattened indices in [0..(M*T1*T2 - 1)]
    """
    sim_4d = calculate_similarity_local_full(z1, z2)  # [N, M, T1, T2]

    N, M, T1, T2 = sim_4d.shape

    # Flatten to [N, M*T1*T2]
    sim_flat = sim_4d.reshape(N, -1)  # shape => [N, M*T1*T2]

    # top-K retrieval for each query row n
    topk = torch.topk(sim_flat, k=topk, dim=1, largest=True)
    return topk

def unravel_timelevel_index(idx, M, T1, T2):
    """
    Map a flattened index in [0..M*T1*T2-1] back to (m, t1, t2).
    """
    m_ = idx // (T1 * T2)
    rem = idx % (T1 * T2)
    t1_ = rem // T2
    t2_ = rem % T2
    return (m_, t1_, t2_)

def calculate_similarity_local_aggregate(z1, z2, reduce_op='mean'):
    """
    Compute local similarity by averaging (or maxing) over all [T1 x T2] time-step pairs.

    Args:
        z1 (torch.Tensor): [N, T1, D]
        z2 (torch.Tensor): [M, T2, D]
        reduce_op (str): 'mean' or 'max' to reduce the 2D time-step matrix.

    Returns:
        torch.Tensor: [N, M] similarity matrix.
    """
    sim_4d = calculate_similarity_local_full(z1, z2)  # [N, M, T1, T2]

    if reduce_op == 'mean':
        # shape => [N, M]
        sim_matrix = sim_4d.mean(dim=(-2, -1))
    elif reduce_op == 'max':
        # shape => [N, M]
        sim_matrix = sim_4d.amax(dim=(-2, -1))
    else:
        raise ValueError(f"Unknown reduce_op '{reduce_op}', use 'mean' or 'max'.")
    
    return sim_matrix

def calculate_similarity_local_diag(z1, z2, method='mean'):
    """
    Compute local similarity by extracting the diagonal of the [T, T] time-step
    similarity matrix and then applying either mean or max across that diagonal.

    Args:
        z1 (torch.Tensor): Local embeddings for set 1, shape [N, T, D].
        z2 (torch.Tensor): Local embeddings for set 2, shape [M, T, D].
                           (Often, N == M if you're comparing the same set to itself,
                            but it can differ if needed.)
        method (str): How to reduce the diagonal? 'mean' or 'max'.

    Returns:
        torch.Tensor: [N, M] similarity matrix, where each entry is the
                      (mean or max) diagonal similarity between z1[n] and z2[m].
    """
    sim_4d = calculate_similarity_local_full(z1, z2)  # [N, M, T1, T2]

    sim_diag = sim_4d.diagonal(dim1=-2, dim2=-1)  # shape [N, M, T]

    if method == 'mean':
        sim_val = sim_diag.mean(dim=-1)  # shape [N, M]
    elif method == 'max':
        sim_val = sim_diag.max(dim=-1).values  # shape [N, M]
    else:
        raise ValueError(f"Unsupported method='{method}'. Use 'mean' or 'max'.")

    return sim_val  # shape [N, M]

def calculate_similarity_local_gaussian(z1, z2, std=0.1):
    """
    Compute local similarity using a Gaussian weighting around the diagonal.

    Args:
        z1 (torch.Tensor): [N, T, D]
        z2 (torch.Tensor): [M, T, D]
        std (float): standard deviation of the Gaussian around the diagonal.

    Returns:
        torch.Tensor: [N, M] similarity matrix, each entry is a weighted sum over T x T.
    """
    N, T, D = z1.shape
    M = z2.shape[0]

    Ts = torch.linspace(0., 1., T, device=z1.device)
    dist = Ts[:, None] - Ts[None, :]
    prob_scores = torch.exp(-0.5 * (dist / std) ** 2)
    pos_prob = prob_scores / prob_scores.sum(dim=-1, keepdim=True)  # [T, T]

    sim_4d = calculate_similarity_local_full(z1, z2)  # [N, M, T, T]

    #    We want for each (n,m) => sum_{t1, t2} [sim_4d[n,m,t1,t2] * pos_prob[t1,t2]]
    sim_weighted = (sim_4d * pos_prob).sum(dim=(-2, -1)) / T

    return sim_weighted

def calculate_similarity_combined(
    z1_global, z2_global,
    z1_local, z2_local,
    alpha=0.5,
    local_type='diag_max'
):
    """
    Compute a combined similarity that is alpha * global_sim + (1-alpha) * local_sim.

    Args:
        z1_global (torch.Tensor): shape [N, D].
        z2_global (torch.Tensor): shape [M, D].
        z1_local (torch.Tensor): shape [N, T, D].
        z2_local (torch.Tensor): shape [M, T, D].
        alpha (float): weight for global. (1 - alpha) for local.
        local_aggregate (bool): whether to aggregate local embeddings or not.

    Returns:
        torch.Tensor: [N, M] combined similarity matrix.
    """
    sim_global = calculate_similarity_global(z1_global, z2_global)  # [N, M]
    if local_type == 'diag_mean':
        sim_local = calculate_similarity_local_diag(z1_local, z2_local, method='mean')  # [N, M]
    elif local_type == 'diag_max':
        sim_local = calculate_similarity_local_diag(z1_local, z2_local, method='max')
    elif local_type == 'gaussian':
        sim_local = calculate_similarity_local_gaussian(z1_local, z2_local, std=0.1)
    elif local_type == 'aggregate_timesteps_mean':
        sim_local = calculate_similarity_local_aggregate(z1_local, z2_local, reduce_op='mean')
    elif local_type == 'aggregate_timesteps_max':
        sim_local = calculate_similarity_local_aggregate(z1_local, z2_local, reduce_op='max')
    else:
        raise ValueError(f"Unsupported local_type='{local_type}'. Use 'diag_mean', 'diag_max', or 'gaussian'.")
    
    def zscore_scale(sim):
        mean_val = sim.mean()
        std_val = sim.std()
        return (sim - mean_val) / (std_val + 1e-11)

    sim_local_z = zscore_scale(sim_local)
    sim_global_z = zscore_scale(sim_global)

    sim_combined = alpha * sim_global_z + (1 - alpha) * sim_local_z
    
    return sim_combined


def evaluate_query_categories(
    query_labels,
    retrieved_labels_list,
    category_indices=None,
    mode="per_category",
    min_categories_threshold=1
):
    """
    Evaluate how well 'retrieved_labels_list' matches 'query_labels' for certain category columns.

    Args:
        query_labels (np.ndarray or list): shape [NUM_LABELS]. 
            The categories/labels for the query sample.
        retrieved_labels_list (list of np.ndarray or list): 
            Each element is shape [NUM_LABELS], for one retrieved neighbor.
        category_indices (list of int): Which indices in query_labels correspond 
            to category columns. If None, we use all columns.
        mode (str): 
            - "per_category": 
                *Compute an average fraction of matched categories across neighbors.*
                E.g., if a neighbor matches 2 out of 4 categories => 0.5, 
                sum over all neighbors, then / k for the final fraction.
            - "strict_all": 
                *Neighbor is considered a match (1) only if it matches all categories.* 
                Then we get fraction of neighbors that matched all.
            - "min_threshold": 
                *Neighbor is considered a match if it matches at least 'min_categories_threshold' categories.*
        min_categories_threshold (int): used only if mode="min_threshold".

    Returns:
        dict with the following keys:
            - "overall_fraction": float
                *A single fraction of neighbors meeting the mode's criterion.*
                If mode="per_category", it's the average fraction-of-categories matched across neighbors.
                If mode="strict_all" or "min_threshold", it's the fraction of neighbors that pass that condition.
            - "per_category_fractions": np.ndarray of shape [num_categories]
                *For each category, the fraction of neighbors that matched that category.*
            - "neighbors_info": list of dicts, one per neighbor, each containing:
                * "num_matches": how many categories matched,
                * "matches_array": a boolean array indicating which categories matched,
                * "pass_criterion": bool indicating if this neighbor passed the mode criterion
                  (i.e., strictly all, min threshold, or partial fraction in per_category).
                  Note: in "per_category" mode, 'pass_criterion' isn't as directly meaningful
                  since we do fractional matching, but we store True if the neighbor has a non-zero fraction, etc.
    """

    if category_indices is None:
        category_indices = range(len(query_labels))
    category_indices = list(category_indices)
    num_categories = len(category_indices)

    query_labels = np.array(query_labels, dtype=float)

    per_cat_match_count = np.zeros(num_categories, dtype=int)

    score_accumulator = 0.0

    neighbors_info = []

    for neighbor_idx, retrieved_labels in enumerate(retrieved_labels_list):
        retrieved_arr = np.array(retrieved_labels, dtype=float)

        q_sub = query_labels[category_indices]
        r_sub = retrieved_arr[category_indices]

        matches = (q_sub == r_sub)
        num_matches = int(np.sum(matches))

        for i_cat, did_match in enumerate(matches):
            if did_match:
                per_cat_match_count[i_cat] += 1

        if mode == "per_category":
            frac = num_matches / num_categories
            score_accumulator += frac
            pass_criterion = frac > 0.0
            
        elif mode == "strict_all":
            pass_criterion = (num_matches == num_categories)
            if pass_criterion:
                score_accumulator += 1.0
                
        elif mode == "min_threshold":
            pass_criterion = (num_matches >= min_categories_threshold)
            if pass_criterion:
                score_accumulator += 1.0
        else:
            raise ValueError(f"Unknown mode: {mode}.")

        neighbors_info.append({
            "neighbor_index": neighbor_idx,
            "num_matches": num_matches,
            "matches_array": matches,
            "pass_criterion": pass_criterion
        })

    overall_fraction = score_accumulator / len(retrieved_labels_list)
    per_category_fractions = per_cat_match_count / len(retrieved_labels_list)

    return {
        "overall_fraction": overall_fraction, # Average fraction of matched labels among the top-K neighbors.
        "per_category_fractions": per_category_fractions,  # shape [num_categories] Precision@K for category c
        "neighbors_info": neighbors_info
    }

def evaluate_query_phenotypes_absolute(
    query_labels,
    retrieved_labels_list,
    phenotype_indices=None,
    margin_type="percentage",   # "percentage","logarithm","zscore","absolute"
    margin_value=0.1,          # used differently depending on margin_type
    mode="per_category",
    min_categories_threshold=1,
    zscore_stds=None
):
    """
    Evaluate how well 'retrieved_labels_list' matches 'query_labels' for numeric phenotypes,
    using one of:
      - "percentage" => +/- margin_value% of query
      - "logarithm"  => compare in log space
      - "zscore"     => compare in #std devs, using zscore_stds
      - "absolute"   => +/- margin_value in raw units

    Args:
        query_labels (np.ndarray or list): shape [NUM_LABELS], numeric phenotypes for query.
        retrieved_labels_list (list of np.ndarray or list): top-K neighbors, each shape [NUM_LABELS].
        phenotype_indices (list): which columns to evaluate as numeric phenotypes. If None => use all.
        margin_type (str): one of {"percentage","logarithm","zscore","absolute"}.
        margin_value (float): 
            - if "percentage": => +/- margin_value * q_val
            - if "logarithm": => interpret margin_value as +/- margin_value * q_log
            - if "zscore": => # std devs
            - if "absolute": => +/- margin_value in raw units
        mode (str): "per_category","strict_all","min_threshold"
        min_categories_threshold (int): for "min_threshold" mode
        zscore_stds (list or np.ndarray): if margin_type="zscore", pass an array of std dev values.
                                          must match phenotype_indices in length.

    Returns:
        dict => {
          "overall_fraction": float,
          "per_category_fractions": np.ndarray,
          "neighbors_info": list
        }
    """
    if phenotype_indices is None:
        phenotype_indices = range(len(query_labels))
    phenotype_indices = list(phenotype_indices)
    num_phenotypes = len(phenotype_indices)

    if margin_type == "zscore" and (zscore_stds is None):
        raise ValueError("Must provide zscore_stds when margin_type='zscore'.")

    query_arr = np.array(query_labels, dtype=float)

    per_phenotype_match_count = np.zeros(num_phenotypes, dtype=int)
    score_accumulator = 0.0
    neighbors_info = []

    k = len(retrieved_labels_list)
    if k == 0:
        return {
            "overall_fraction": 0.0,
            "per_category_fractions": per_phenotype_match_count,
            "neighbors_info": []
        }

    for neighbor_idx, neighbor_labels in enumerate(retrieved_labels_list):
        neighbor_arr = np.array(neighbor_labels, dtype=float)
        matches_array = np.zeros(num_phenotypes, dtype=bool)

        for i_p, p_idx in enumerate(phenotype_indices):
            q_val = query_arr[p_idx]
            r_val = neighbor_arr[p_idx]

            if margin_type == "percentage":
                lower = q_val * (1 - margin_value)
                upper = q_val * (1 + margin_value)
                matches_array[i_p] = (r_val >= lower and r_val <= upper)

            elif margin_type == "absolute":
                diff = abs(r_val - q_val)
                matches_array[i_p] = (diff <= margin_value)

            elif margin_type == "logarithm":
                if q_val <= 0 or r_val <= 0:
                    matches_array[i_p] = False
                else:
                    q_log = np.log(q_val)
                    r_log = np.log(r_val)
                    # interpret margin_value as a direct difference in log space
                    # => diff_log <= margin_value
                    diff_log = abs(r_log - q_log)
                    matches_array[i_p] = (diff_log <= margin_value)

            elif margin_type == "zscore":
                std_val = zscore_stds[i_p]
                z_diff = abs(r_val - q_val) / std_val
                matches_array[i_p] = (z_diff <= margin_value)

            else:
                raise ValueError(f"Unknown margin_type='{margin_type}'.")

        num_matches = np.sum(matches_array)

        for i_p, did_match in enumerate(matches_array):
            if did_match:
                per_phenotype_match_count[i_p] += 1

        # Evaluate neighbor for 'mode'
        if mode == "per_category":
            frac = num_matches / num_phenotypes
            score_accumulator += frac
            pass_criterion = frac > 0.0
        elif mode == "strict_all":
            pass_criterion = (num_matches == num_phenotypes)
            if pass_criterion:
                score_accumulator += 1.0
        elif mode == "min_threshold":
            pass_criterion = (num_matches >= min_categories_threshold)
            if pass_criterion:
                score_accumulator += 1.0
        else:
            raise ValueError(f"Unknown mode: {mode}.")

        neighbors_info.append({
            "neighbor_index": neighbor_idx,
            "num_matches": int(num_matches),
            "matches_array": matches_array,
            "pass_criterion": pass_criterion
        })

    overall_fraction = score_accumulator / k
    per_phenotype_fractions = per_phenotype_match_count / k
    
    return {
        "overall_fraction": overall_fraction,
        "per_category_fractions": per_phenotype_fractions,
        "neighbors_info": neighbors_info
    }
    
def compute_full_ranks_metrics_per_pheno(
    query_labels, 
    full_ranks_indices, 
    gallery_labels, 
    phenotype_indices,
    margin_value=0.5,
    zscore_stds=None
):
    """
    Compute full-gallery Mean Rank and Median Rank for each phenotype.
    
    For each query, for each phenotype (given by phenotype_indices), we scan the entire ranking 
    (provided by full_ranks_indices) and record the 1-indexed rank at which the candidate first matches 
    the query using z-score matching:
         match if abs(r_val - q_val) / std <= margin_value.
    
    If no candidate matches for a given phenotype, assign a worst-case rank of (N_candidates + 1).
    
    After processing all queries, this function returns, for each phenotype, the mean and median rank 
    computed over all queries.
    
    Args:
        query_labels (array-like): Array of shape [num_queries, num_labels] (e.g., aligned_labels_matrix).
        full_ranks_indices (np.ndarray): For each query, a full sorted array of candidate indices (full ranking).
        gallery_labels (array-like): Array of shape [num_candidates, num_labels].
        phenotype_indices (list): List of indices for the phenotypes to evaluate.
        margin_value (float): z-score margin threshold.
        zscore_stds (list or np.ndarray): Standard deviations per phenotype.
        
    Returns:
        per_pheno_mean_ranks (np.ndarray): Array of mean ranks per phenotype.
        per_pheno_median_ranks (np.ndarray): Array of median ranks per phenotype.
        per_pheno_ranks (list of lists): List (one per phenotype) of all ranks (one per query).
    """
    num_queries = len(query_labels)
    N_candidates = len(gallery_labels)
    
    # Create a list of lists to store the rank for each phenotype across queries.
    # per_pheno_ranks[ph] will be a list of rank values (one per query) for phenotype 'ph'.
    per_pheno_ranks = [[] for _ in range(len(phenotype_indices))]
    
    # Iterate over each query.
    for i in range(num_queries):
        query = np.array(query_labels[i], dtype=float)
        ranking = np.array(full_ranks_indices[i])  # full ranking for query i
        # For each phenotype, find the rank (1-indexed) of the first candidate that matches.
        for idx, ph in enumerate(phenotype_indices):
            rank_found = None
            for rank, candidate_idx in enumerate(ranking):
                candidate = np.array(gallery_labels[candidate_idx], dtype=float)
                q_val = query[ph]
                r_val = candidate[ph]
                std_val = zscore_stds[ph]
                z_diff = abs(r_val - q_val) / std_val
                if z_diff <= margin_value:
                    rank_found = rank + 1  # 1-indexed
                    break
            if rank_found is None:
                rank_found = N_candidates + 1  # worst-case if no match found
            per_pheno_ranks[idx].append(rank_found)
    
    # Compute per-phenotype mean and median rank over queries.
    per_pheno_mean_ranks = np.array([np.mean(ranks) for ranks in per_pheno_ranks])
    per_pheno_median_ranks = np.array([np.median(ranks) for ranks in per_pheno_ranks])
    
    return per_pheno_mean_ranks, per_pheno_median_ranks, per_pheno_ranks

    
def retrieve_and_evaluate(
    # Embeddings for global approach
    ecg_embeddings_global=None,    # [N, D]
    mri_embeddings_global=None,    # [N, D]
    # Embeddings for local approach
    ecg_embeddings_local=None,     # [N, T_ecg, D]
    mri_embeddings_local=None,     # [N, T_mri, D]
    
    labels_table=None,
    dataloader_eids=None,
    approach="global",         # "global", "local", or "combined"
    local_type="diag_mean",    # if approach=="local" or "combined"
    alpha=0.5,                 # if approach=="combined"
    
    k=5,
    
    # Evaluation method:
    evaluation_method="category",  # "category" or "absolute"
    mode="per_category",           # "per_category","strict_all","min_threshold"
    min_categories_threshold=1,
    
    margin_type="zscore",          # or "logarithm","percentage","absolute", "zscore"
    margin_value=1.0,              # e.g. ±1 std          # if evaluation_method=="absolute"
    # Additional parameter:
    category_cols=None             # None => use columns ending with "_cat", 
                                   # or a list of column names, e.g. ["I20","I21","I22"]
):
    """
    A unified retrieval + evaluation function:
      1) Chooses how to compute similarity: global/local/combined
      2) Retrieves top-K neighbors
      3) Evaluates them either via category-based or absolute-value-based approach.

    Args:
        ecg_embeddings_global (torch.Tensor): shape [N, D] for global ECG
        mri_embeddings_global (torch.Tensor): shape [N, D] for global MRI
        ecg_embeddings_local (torch.Tensor):  shape [N, T_ecg, D] for local ECG
        mri_embeddings_local (torch.Tensor):  shape [N, T_mri, D] for local MRI
        labels_table (pd.DataFrame): must contain 'eid', plus columns for categories or numeric phenotypes
        dataloader_eids (list): the eids in the same order as embeddings
        approach (str): "global","local","combined" => which similarity function to use
        local_type (str): e.g. 'diag_mean','diag_max','gaussian','aggregate_timesteps_mean','aggregate_timesteps_max'
        alpha (float): weight for global if approach=="combined"
        
        k (int): top-K neighbors
        evaluation_method (str): "category" => evaluate_query_categories, 
                                 "absolute" => evaluate_query_phenotypes_absolute
        mode (str): "per_category","strict_all","min_threshold"
        min_categories_threshold (int): for "min_threshold" mode
        margin_type (str): "percentage" or "absolute", if evaluation_method="absolute"
        margin_value (float): margin size for absolute-based checking

    Returns:
        all_query_results (list of dict): retrieval + evaluation details per query
        final_score (float): average "overall_fraction" across queries
        mean_per_category (np.ndarray): average per-category (or per-phenotype) fraction across queries
    """

    labels_table = labels_table.set_index("eid").loc[dataloader_eids].reset_index()
    features_df = labels_table.drop(columns=["eid"]).copy()  # drop eid here
    aligned_labels_matrix = features_df.values  # shape [N, NUM_LABELS]
    all_columns = features_df.columns.tolist()  # no 'eid' => direct alignment

    if approach == "global":
        similarity_matrix = calculate_similarity_global(ecg_embeddings_global, mri_embeddings_global)
        
    elif approach == "local":
        if local_type == "timelevel_topk":
            similarity_matrix = None
        elif local_type == 'diag_mean':
            similarity_matrix = calculate_similarity_local_diag(ecg_embeddings_local, mri_embeddings_local, method='mean')
        elif local_type == 'diag_max':
            similarity_matrix = calculate_similarity_local_diag(ecg_embeddings_local, mri_embeddings_local, method='max')
        elif local_type == 'gaussian':
            similarity_matrix = calculate_similarity_local_gaussian(ecg_embeddings_local, mri_embeddings_local, std=0.1)
        elif local_type == 'aggregate_timesteps_mean':
            similarity_matrix = calculate_similarity_local_aggregate(ecg_embeddings_local, mri_embeddings_local, reduce_op='mean')
        elif local_type == 'aggregate_timesteps_max':
            similarity_matrix = calculate_similarity_local_aggregate(ecg_embeddings_local, mri_embeddings_local, reduce_op='max')
        else:
            raise ValueError(f"Unsupported local_type='{local_type}'.")
    elif approach == "combined":
        similarity_matrix = calculate_similarity_combined(
            z1_global=ecg_embeddings_global,
            z2_global=mri_embeddings_global,
            z1_local=ecg_embeddings_local,
            z2_local=mri_embeddings_local,
            alpha=alpha,
            local_type=local_type
        )
    else:
        raise ValueError(f"Unknown approach='{approach}'. Choose 'global','local','combined'.")

    if approach == "local" and local_type == "timelevel_topk":
        topk = calculate_similarity_local_timelevel_retrieval(ecg_embeddings_local, mri_embeddings_local, topk=k)
        topk_indices = topk.indices
        topk_values = topk.values
    else:
        topk = torch.topk(similarity_matrix, k=k, dim=1, largest=True)
        topk_indices = topk.indices  # [N, k]
        topk_values = topk.values    # [N, k]
    
    if evaluation_method == "category":
        if category_cols is None:
            cat_cols_indices = [i for i, col in enumerate(all_columns) if col.endswith("_cat")]
        else:
            cat_cols_indices = []
            for c in category_cols:
                if c in all_columns:
                    cat_cols_indices.append(all_columns.index(c))
                else:
                    print(f"Warning: '{c}' not found in the table columns.")
    else:
        cat_cols_indices = []
        
    numeric_indices = [i for i, col in enumerate(all_columns)
                       if (col not in all_columns or not col.endswith("_cat")) and (col != "sex")]
    
    zscore_stds = None
    if evaluation_method == "absolute" and margin_type == "zscore":
        zscore_stds = []
        for i in numeric_indices:
            col_name = all_columns[i]
            std_val = labels_table[col_name].std()  # compute std from entire dataset
            zscore_stds.append(std_val)
    
    all_query_results = []
    N = len(dataloader_eids)

    for i in range(N):
        query_labels = aligned_labels_matrix[i]  # shape [NUM_LABELS]
        query_similarities_topk = topk_values[i]
        retrieved_indices = topk_indices[i]

        if approach == "local" and local_type == "timelevel_topk":
            retrieved_labels = []
            retrieved_eids = []
            for flat_idx in retrieved_indices:
                m_, _, _ = unravel_timelevel_index(
                    flat_idx.item(),
                    mri_embeddings_local.size(0),  # Number of patients
                    ecg_embeddings_local.size(1),  # ECG time steps
                    mri_embeddings_local.size(1)   # MRI time steps
                )
                retrieved_labels.append(aligned_labels_matrix[m_])
                retrieved_eids.append(dataloader_eids[m_])
        else:
            retrieved_labels = [aligned_labels_matrix[idx] for idx in retrieved_indices]
            retrieved_eids = [dataloader_eids[idx] for idx in retrieved_indices]

        if evaluation_method == "category":
            score_dict = evaluate_query_categories(
                query_labels=query_labels,
                retrieved_labels_list=retrieved_labels,
                category_indices=cat_cols_indices,
                mode=mode,
                min_categories_threshold=min_categories_threshold
            )
        elif evaluation_method == "absolute":
            score_dict = evaluate_query_phenotypes_absolute(
                query_labels=query_labels,
                retrieved_labels_list=retrieved_labels,
                phenotype_indices=numeric_indices,
                margin_type=margin_type,      
                margin_value=margin_value,
                mode=mode,
                min_categories_threshold=min_categories_threshold,
                zscore_stds=zscore_stds
            )
        else:
            raise ValueError(f"Unknown evaluation_method='{evaluation_method}'. Choose 'category' or 'absolute'.")

        result = {
            "query_eid": dataloader_eids[i],
            "query_labels": query_labels,
            "retrieved_eids": retrieved_eids,
            "retrieved_labels": retrieved_labels,
            "similarity_scores": query_similarities_topk.tolist(),
            "overall_fraction": score_dict["overall_fraction"],
            "per_category_fractions": score_dict["per_category_fractions"].tolist(),
            "neighbors_info": score_dict["neighbors_info"]
        }
        all_query_results.append(result)

    # Compute the mean and std for the overall retrieval score
    final_score = float(np.mean([r["overall_fraction"] for r in all_query_results]))
    final_score_std = float(np.std([r["overall_fraction"] for r in all_query_results]))
    print(f"Average retrieval score over all queries: {final_score:.4f} ± {final_score_std:.4f}")

    # Compute the mean and std for the per-category retrieval scores
    per_cat_arrays = np.array([r["per_category_fractions"] for r in all_query_results])
    mean_per_category = per_cat_arrays.mean(axis=0)
    std_per_category = per_cat_arrays.std(axis=0)
    print(f"Average retrieval score per category/phenotype: {mean_per_category}")
    print(f"Standard deviation per category/phenotype: {std_per_category}")
    
    # ---- Compute Full Gallery Ranking Metrics (MdR and MnR) ----
    # full_ranks_indices: ranking of all candidates for each query.
    full_ranks_indices = torch.argsort(similarity_matrix, dim=1, descending=True)
    if hasattr(full_ranks_indices, "cpu"):
        full_ranks_indices_np = full_ranks_indices.cpu().numpy()
    else:
        full_ranks_indices_np = np.array(full_ranks_indices)

    # Call the helper function to compute per-phenotype rank metrics:
    per_pheno_mean_ranks, per_pheno_median_ranks, per_pheno_ranks = compute_full_ranks_metrics_per_pheno(
        query_labels=aligned_labels_matrix,    # shape [num_queries, num_labels]
        full_ranks_indices=full_ranks_indices_np,
        gallery_labels=aligned_labels_matrix,    # full gallery labels
        phenotype_indices=numeric_indices,       # list of phenotype indices
        margin_value=margin_value,
        zscore_stds=zscore_stds
    )
    print("Per-Phenotype Mean Ranks:", per_pheno_mean_ranks)
    print("Per-Phenotype Median Ranks:", per_pheno_median_ranks)
    
    return all_query_results, (final_score, final_score_std), (mean_per_category, std_per_category)


def compute_embeddings(data_loader, components, model_type, cfg, device="cpu"):
    mri_embeddings_global_from_encoder = []
    ecg_embeddings_global_from_encoder = []
    
    ecg_embeddings_global = []
    mri_embeddings_global = []
    
    ecg_embeddings_local = []
    mri_embeddings_local = []
    
    mri_embeddings_non_projected_local = []
    ecg_embeddings_non_projected_local = []
    
    eids = []
    
    ecg_model = components["ecg_model"].to(device)
    mri_model = components["mri_model"].to(device)

    projection_ecg_global = components["projection_ecg_global"].to(device)
    projection_mri_global = components["projection_mri_global"].to(device)
    
    if model_type in ['local', 'both']:
        projection_ecg_local = components["projection_ecg_local"].to(device)
        projection_mri_local = components["projection_mri_local"].to(device)

    layernorm_ecg_global = components["layernorm_ecg_global"].to(device)
    layernorm_mri_global = components["layernorm_mri_global"].to(device)
    
    heartbeat_representation = ECGHeartBeatRepresentation(
        target_length=mri_model.config.num_frames // mri_model.config.tubelet_size,
        time_patch_size=ecg_model.config.patch_size[1],
        channel_patch_size=ecg_model.config.patch_size[0],
        crop_method=cfg.training_mode.reduction.ecg.crop_method,
        n_mri_frames=mri_model.config.num_frames, 
        channel_reduction=cfg.training_mode.reduction.ecg.all_tokens
    )

    with torch.no_grad():
        for sample in tqdm(data_loader, desc="Computing Embeddings"):
            ecg = sample["ecg_aug"].to(device)
            mri = sample["image_aug"].to(device)
            patient_id = sample["patient_id"]
            rpeaks = sample['rpeaks'].to(device)
                
            ecg_output = ecg_model(pixel_values=ecg, apply_masking=False, use_layernorm=False).last_hidden_state
            mri_output = mri_model(pixel_values=mri, apply_masking=False, use_layernorm=False).last_hidden_state
            
            # Global embeddings
            if model_type in ['global', 'both']:
                ecg_cls = torch.mean(ecg_output[:, 1:], dim=1) # CLS token or all tokens
                ecg_cls_normed = layernorm_ecg_global(ecg_cls)  # Apply LayerNorm
                ecg_proj = projection_ecg_global(ecg_cls_normed)  # Apply projection
                
                ecg_embeddings_global.append(ecg_proj.cpu().detach())
                ecg_embeddings_global_from_encoder.append(ecg_cls.cpu().detach())

                mri_cls = torch.mean(mri_output[:, 1:], dim=1)  # CLS token or all tokens
                mri_cls_normed = layernorm_mri_global(mri_cls)  # Apply LayerNorm
                mri_proj = projection_mri_global(mri_cls_normed)  # Apply projection
                
                mri_embeddings_global.append(mri_proj.cpu().detach())
                mri_embeddings_global_from_encoder.append(mri_cls.cpu().detach())
                
                
                # Time-Wise aligment
                ecg_all_non_projected = ecg_output[:, 1:]  # All tokens (excluding CLS)
                ecg_all_non_projected = heartbeat_representation(ecg_embeddings=ecg_all_non_projected, rpeaks=rpeaks)  # Interpolate
                ecg_embeddings_non_projected_local.append(ecg_all_non_projected.cpu().detach())
                
                # Time-Wise aligment
                mri_all_non_projected = mri_output[:, 1:]
                mri_all_non_projected = torch.reshape(
                    mri_all_non_projected,
                    (mri_all_non_projected.shape[0],
                     mri_model.config.num_frames // mri_model.config.tubelet_size,
                     (mri_model.config.image_size // mri_model.config.patch_size) * (mri_model.config.image_size // mri_model.config.patch_size),
                     mri_model.config.hidden_size)
                ).mean(dim=2)  # Average over patches
                mri_embeddings_non_projected_local.append(mri_all_non_projected.cpu().detach())

            # Local embeddings
            if model_type in ['local', 'both']:
                ecg_all = ecg_output[:, 1:]  # All tokens (excluding CLS)
                ecg_all = heartbeat_representation(ecg_embeddings=ecg_all, rpeaks=rpeaks)  # Interpolate
                ecg_all_proj = projection_ecg_local(ecg_all)  # Apply projection
                
                ecg_embeddings_local.append(ecg_all_proj.cpu().detach())
                
                mri_all = mri_output[:, 1:]  # All tokens (excluding CLS)
                mri_all = torch.reshape(
                    mri_all,
                    (mri_all.shape[0],
                     mri_model.config.num_frames // mri_model.config.tubelet_size,
                     (mri_model.config.image_size // mri_model.config.patch_size) * (mri_model.config.image_size // mri_model.config.patch_size),
                     mri_model.config.hidden_size)
                ).mean(dim=2)  # Average over patches
                mri_all_proj = projection_mri_local(mri_all)  # Apply projection
                
                mri_embeddings_local.append(mri_all_proj.cpu().detach())
            
            eids.extend(patient_id.tolist())

    # Concatenate embeddings across batches
    ecg_embeddings_global = torch.cat(ecg_embeddings_global, dim=0)
    mri_embeddings_global = torch.cat(mri_embeddings_global, dim=0)
    
    mri_embeddings_global_from_encoder = torch.cat(mri_embeddings_global_from_encoder, dim=0)
    ecg_embeddings_global_from_encoder = torch.cat(ecg_embeddings_global_from_encoder, dim=0)
    
    ecg_embeddings_local = torch.cat(ecg_embeddings_local, dim=0) if ecg_embeddings_local else None
    mri_embeddings_local = torch.cat(mri_embeddings_local, dim=0) if mri_embeddings_local else None
    
    ecg_embeddings_non_projected_local = torch.cat(ecg_embeddings_non_projected_local, dim=0)
    mri_embeddings_non_projected_local = torch.cat(mri_embeddings_non_projected_local, dim=0) # ALL TOKENS RESHAPED and NOT PROJECTED
    
    return {
        "ecg_embeddings_global": ecg_embeddings_global, # LAYERNORM + PROJECTED
        "mri_embeddings_global": mri_embeddings_global,
        "mri_embeddings_global_from_encoder": mri_embeddings_global_from_encoder,
        "ecg_embeddings_global_from_encoder": ecg_embeddings_global_from_encoder,
        "mri_embeddings_local_from_encoder": mri_embeddings_non_projected_local,
        "ecg_embeddings_local_from_encoder": ecg_embeddings_non_projected_local,
        "ecg_embeddings_local": ecg_embeddings_local, # WILL BE NONE IF GLOBAL ONLY
        "mri_embeddings_local": mri_embeddings_local,
        "eids": eids
    }


def assign_single_phenotype_categories(
    df: pd.DataFrame,
    phenotype: str,
    n_cats: int = 3,
    sex_based: bool = True,
    lower_percentile: float = 0.1,
    upper_percentile: float = 0.9,
    approach: str = 'quantile',
    cat_col_name: str = None,
    normal_ranges: dict = None
):
    """
    Assign categories to a single numeric phenotype column in a DataFrame,
    creating e.g. low/med/high (n_cats=3) or more categories if desired.

    Args:
        df (pd.DataFrame): Must contain the phenotype column and 'sex' if sex_based=True.
        phenotype (str): Name of the numeric column to categorize.
        n_cats (int): Number of categories (e.g., 3 => Low/Med/High).
        sex_based (bool): If True, compute separate thresholds for males and females.
        lower_percentile (float): e.g. 0.1 => 10th percentile for first cutoff
        upper_percentile (float): e.g. 0.9 => 90th percentile for second cutoff
        approach (str): 'quantile' => use percentile-based thresholds,
                        'manual' => use hard-coded thresholds in the example below.
        cat_col_name (str): If None, defaults to f"{phenotype}_cat".
        normal_ranges (dict): Specifies normal ranges for each sex (e.g., {"male": [109, 218], "female": [88, 161]}).

    Returns:
        pd.DataFrame: Copy of df with a new column containing assigned categories
                      (0..n_cats-1 or as specified).
    """
    
    df_out = df.copy()
    
    if cat_col_name is None:
        cat_col_name = f"{phenotype}_cat"

    # ---------- Helper function for integer 3-category assignment ----------
    def categorize_values(values, q1, q2):
        """
        For n_cats=3, we define two cut points: q1 < q2.
        Return category 0 if value < q1, 1 if between q1 and q2, 2 if >= q2.
        """
        cat_values = np.zeros(len(values), dtype=int)
        cat_values[values >= q2] = 2
        cat_values[(values >= q1) & (values < q2)] = 1
        return cat_values

    # =============== Approach: 'quantile' =================
    if approach == 'quantile':
        if n_cats != 3:
            raise ValueError("Current example supports only n_cats=3 for 'quantile' approach. "
                             "Extend logic if you need more categories.")
        
        if sex_based:
            mask_male = (df_out['sex'] == 1)
            mask_female = (df_out['sex'] == 0)

            # Males
            male_vals = df_out.loc[mask_male, phenotype].dropna()
            q1_m = male_vals.quantile(lower_percentile)
            q2_m = male_vals.quantile(upper_percentile)
            
            print(f"MALE {phenotype} {q1_m:.2f} {q2_m:.2f}")
            
            # Females
            female_vals = df_out.loc[mask_female, phenotype].dropna()
            q1_f = female_vals.quantile(lower_percentile)
            q2_f = female_vals.quantile(upper_percentile)
            
            print(f"FEMALE {phenotype} {q1_f:.2f} {q2_f:.2f}")

            # Assign
            df_out.loc[mask_male, cat_col_name] = categorize_values(
                df_out.loc[mask_male, phenotype].values, q1_m, q2_m
            )
            df_out.loc[mask_female, cat_col_name] = categorize_values(
                df_out.loc[mask_female, phenotype].values, q1_f, q2_f
            )
        else:
            # Single cutoff for entire dataset
            values = df_out[phenotype].dropna()
            q1 = values.quantile(lower_percentile)
            q2 = values.quantile(upper_percentile)
            cat_all = categorize_values(df_out[phenotype].values, q1, q2)
            df_out[cat_col_name] = cat_all

    # =============== Approach: 'manual' =================
    elif approach == 'manual':
        
        def manual_assign(value, sex_val=None):
            """
            Assign categories based on manual normal ranges.
            """
            if sex_val == 1:  # Male
                lower, upper = normal_ranges['male']
            elif sex_val == 0:  # Female
                lower, upper = normal_ranges['female']
            else:
                raise ValueError("Invalid sex value. Expected 0 or 1.")

            if value < lower:
                return 0
            elif lower <= value <= upper:
                return 1
            else:
                return 2

        df_out[cat_col_name] = [
            manual_assign(v, s) 
            for v, s in zip(df_out[phenotype], df_out['sex'])
        ]

    # =============== Approach: 'prediction_interval' =================
    elif approach == 'prediction_interval':
        
        def calculate_prediction_interval(values):
            """
            Calculate the 95% prediction interval based on mean ± t0.975,n-1 * √((n+1)/n) * std_dev.
            """
            n = len(values)
            if n < 2:
                raise ValueError("Insufficient data to calculate prediction interval.")
            
            mean = np.mean(values)
            std_dev = np.std(values, ddof=1)  # Sample standard deviation
            t_val = t.ppf(0.975, df=n-1)  # 97.5th percentile of t-distribution
            correction_factor = np.sqrt((n + 1) / n)
            margin_of_error = t_val * correction_factor * std_dev

            lower_bound = mean - margin_of_error
            upper_bound = mean + margin_of_error
            return lower_bound, upper_bound
    
        if sex_based:
            mask_male = (df_out['sex'] == 1)
            mask_female = (df_out['sex'] == 0)

            # Males
            male_vals = df_out.loc[mask_male, phenotype].dropna()
            lower_m, upper_m = calculate_prediction_interval(male_vals)
            print(f"MALE {phenotype}: {lower_m:.2f} to {upper_m:.2f}")
            
            # Females
            female_vals = df_out.loc[mask_female, phenotype].dropna()
            lower_f, upper_f = calculate_prediction_interval(female_vals)
            print(f"FEMALE {phenotype}: {lower_f:.2f} to {upper_f:.2f}")

            # Assign
            df_out.loc[mask_male, cat_col_name] = categorize_values(
                df_out.loc[mask_male, phenotype].values, lower_m, upper_m
            )
            df_out.loc[mask_female, cat_col_name] = categorize_values(
                df_out.loc[mask_female, phenotype].values, lower_f, upper_f
            )
        else:
            values = df_out[phenotype].dropna()
            lower, upper = calculate_prediction_interval(values)
            cat_all = categorize_values(df_out[phenotype].values, lower, upper)
            df_out[cat_col_name] = cat_all

    else:
        raise ValueError(f"Unknown approach='{approach}'. Use 'quantile', 'manual', or 'prediction_interval'.")
    
    return df_out

def assign_categories_for_phenotypes(
    df: pd.DataFrame,
    phenotypes: list,
    n_cats: int = 3,
    sex_based: bool = True,
    lower_percentile: float = 0.1,
    upper_percentile: float = 0.9,
    approach: str = 'quantile',
    normal_ranges_dict: dict = None
):
    """
    Loop over multiple phenotypes, calling assign_single_phenotype_categories
    for each, returning a df with columns <phenotype>_cat added.

    Args:
        df (pd.DataFrame): original DataFrame
        phenotypes (list): list of column names (strings) to categorize
        n_cats (int): number of categories (3 => low/med/high)
        sex_based (bool): if True, compute separate cutoffs for males/females
        lower_percentile (float): used in 'quantile' approach
        upper_percentile (float): used in 'quantile' approach
        approach (str): 'quantile' or 'manual'

    Returns:
        pd.DataFrame: A copy of df with new columns <phenotype>_cat for each phenotype.
    """
    df_out = df.copy()
    for ph in phenotypes:
        normal_ranges = None
        if approach == 'manual' and normal_ranges_dict:
            normal_ranges = normal_ranges_dict.get(ph)
            if normal_ranges is None:
                raise ValueError(f"Missing normal ranges for phenotype '{ph}' in manual approach.")
        
        df_out = assign_single_phenotype_categories(
            df=df_out,
            phenotype=ph,
            n_cats=n_cats,
            sex_based=sex_based,
            lower_percentile=lower_percentile,
            upper_percentile=upper_percentile,
            approach=approach,
            cat_col_name=None,  # uses default f"{ph}_cat"
            normal_ranges=normal_ranges
        )
    return df_out
