import torch
import numpy as np
import random
import math
import wandb
import matplotlib.pyplot as plt
import seaborn as sns
import torch.nn.functional as F

from lightning import seed_everything

from torch import nn

from sklearn.cluster import KMeans
from torch.optim import Adam, SGD, AdamW
from torch.optim.lr_scheduler import CosineAnnealingLR, StepLR, ExponentialLR
from torch.optim.lr_scheduler import _LRScheduler
from torch.optim.optimizer import Optimizer, required


def fix_seed(seed: int = 42):
    torch.manual_seed(seed)
    np.random.seed(seed)
    random.seed(seed)
    seed_everything(seed)

    if torch.cuda.is_available():
        torch.cuda.manual_seed(seed)
        torch.cuda.manual_seed_all(seed)  # for multi-GPU.
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.benchmark = False

def move_batch_to_device(batch, device):
    if isinstance(batch, torch.Tensor):
        return batch.to(device)
    elif isinstance(batch, (list, tuple)):
        return [item.to(device) for item in batch]
    elif isinstance(batch, dict):
        return {k: v.to(device) for k, v in batch.items()}
    else:
        raise TypeError(f"Unsupported batch type: {type(batch)}")
    
def log_metrics(wandb_logger, metrics_dict):
    if wandb_logger:
        wandb_logger.log(metrics_dict)


def reorder_kmeans_labels(labels, cluster_centers):
    """
    Re-map the cluster 'labels' (0..n_clusters-1) so that label=0
    corresponds to the smallest cluster center, label=1 to the next one, etc.
    
    Args:
        labels (np.ndarray): shape [N], cluster labels from KMeans (0..n_clusters-1).
        cluster_centers (np.ndarray): shape [n_clusters, 1] or [n_clusters], 
                                      the numeric center for each cluster.

    Returns:
        new_labels (np.ndarray): shape [N], the re-mapped labels (0..n_clusters-1),
                                 in ascending order of cluster center.
    """
    # Flatten cluster_centers in case it's (n_clusters, 1)
    centers = cluster_centers.flatten()
    # Sort cluster indices by their numeric center
    sorted_idx = np.argsort(centers)  # e.g. [2,0,1] if cluster_2 < cluster_0 < cluster_1
    # Create a map from old_label -> new_label
    # Example: if sorted_idx=[2,0,1], new_label_map[2]=0, new_label_map[0]=1, new_label_map[1]=2
    new_label_map = {}
    for new_label, old_label in enumerate(sorted_idx):
        new_label_map[old_label] = new_label
    # Apply the mapping to each element in labels
    new_labels = np.array([new_label_map[lbl] for lbl in labels], dtype=int)
    return new_labels

def assign_kmeans_categories_with_order(
    df,
    phenotype,
    n_clusters=3,
    sex_based=True,
    cat_col_name=None
):
    """
    Assign categories by applying k-means to a single phenotype distribution,
    optionally splitting by sex so each group has its own k-means.
    Then reorder the cluster labels so label=0 => lowest center, etc.

    Returns a DataFrame with new column for category assignment.
    """
    if cat_col_name is None:
        cat_col_name = f"{phenotype}_kmeans_cat"
    
    df_out = df.copy()
    
    def run_kmeans(values, n_clusters):
        X = values.reshape(-1,1)  # KMeans expects 2D
        km = KMeans(n_clusters=n_clusters, random_state=42)
        raw_labels = km.fit_predict(X)        # 0..(n_clusters-1)
        centers   = km.cluster_centers_       # shape [n_clusters, 1]
        # Reorder the labels
        new_labels = reorder_kmeans_labels(raw_labels, centers)
        return new_labels, centers

    if sex_based:
        mask_male   = (df_out['sex'] == 1)
        mask_female = (df_out['sex'] == 0)

        # Males
        male_vals = df_out.loc[mask_male, phenotype].dropna().values
        if len(male_vals) > 0:
            male_labels, male_centers = run_kmeans(male_vals, n_clusters)
            idx_male = df_out.loc[mask_male & df_out[phenotype].notna()].index
            df_out.loc[idx_male, cat_col_name] = male_labels
        # Females
        female_vals = df_out.loc[mask_female, phenotype].dropna().values
        if len(female_vals) > 0:
            female_labels, female_centers = run_kmeans(female_vals, n_clusters)
            idx_female = df_out.loc[mask_female & df_out[phenotype].notna()].index
            df_out.loc[idx_female, cat_col_name] = female_labels

    else:
        vals_all = df_out[phenotype].dropna().values
        labels_all, centers_all = run_kmeans(vals_all, n_clusters)
        df_out[cat_col_name] = -1  # default
        idx_all = df_out.loc[df_out[phenotype].notna()].index
        df_out.loc[idx_all, cat_col_name] = labels_all

    return df_out

class WarmupCosineStepScheduler(_LRScheduler):
    def __init__(self, optimizer, warmup_steps, total_steps, min_lr_factor=0.1, last_step=-1):
        self.warmup_steps = warmup_steps
        self.total_steps = total_steps
        self.min_lr_factor = min_lr_factor
        super().__init__(optimizer, last_epoch=last_step)

    def get_lr(self):
        if self.last_epoch < self.warmup_steps:
            # Linear warmup from min_lr_factor * base_lr to base_lr
            lr_scale = self.min_lr_factor + (1 - self.min_lr_factor) * (self.last_epoch / self.warmup_steps)
            return [base_lr * lr_scale for base_lr in self.base_lrs]
        elif self.last_epoch <= self.total_steps:
            # Cosine decay after warmup
            cos_step = self.last_epoch - self.warmup_steps
            cos_steps = self.total_steps - self.warmup_steps
            cos_theta = cos_step / cos_steps
            cos_theta = min(max(cos_theta, 0), 1)
            return [base_lr * (self.min_lr_factor + (1 - self.min_lr_factor) * 0.5 * (1 + math.cos(math.pi * cos_theta))) for base_lr in self.base_lrs]
        else:
            # After total_steps, maintain the minimum learning rate
            return [base_lr * self.min_lr_factor for base_lr in self.base_lrs]

    def step(self, epoch=None):
        super().step(epoch)


def define_param_groups(models, weight_decay):
    def exclude_from_wd(name, module):
        return (
            'bias' in name or
            'layernorm' in name or
            'position_embeddings' in name or
            'mask_token' in name or
            'cls_token' in name or
            'log_temperature' in name or
            'projection_head' in name or
            'linear_probe_layer' in name or
            'linear_layer_reduction' in name or
            'attention_pooling' in name or
            'layerscale' in name or 
            'bn' in name or isinstance(module, (nn.LayerNorm,
                                                nn.BatchNorm1d,
                                                nn.BatchNorm2d,
                                                nn.BatchNorm3d))
        )
    if not isinstance(models, list):
        models = [models]

    param_groups = {'weight_decay': [], 'no_weight_decay': []}
    
    for model in models:
        for name, module in model.named_modules():
            for n, p in module.named_parameters(recurse=False):
                if p.requires_grad:
                    full_name = f"{name}.{n}" if name else n
                    group = 'no_weight_decay' if exclude_from_wd(full_name, module) else 'weight_decay'
                    param_groups[group].append(p)

    return [
        {'params': param_groups['weight_decay'], 'weight_decay': weight_decay, 'name': 'wd'},
        {'params': param_groups['no_weight_decay'], 'weight_decay': 0.0, 'name': 'no wd'}
    ]


def create_optimizer_and_scheduler(models, optimizer_params, num_batches_per_epoch):
    try:
        optimizer_name = next(
            n for n, config in optimizer_params['optimizer'].items() if config.get('use', True)
        )
    except StopIteration:
        raise ValueError("Configuration error: No optimizer is marked for use.")

    try:
        scheduler_name = next(
            n for n, config in optimizer_params['scheduler'].items() if config.get('use', True)
        )
    except StopIteration:
        scheduler_name = None

    param_groups = define_param_groups(models, optimizer_params['weight_decay'])

    optimizer_constructors = {
        'adamw': lambda params, config: AdamW(params, lr=optimizer_params['lr'], **config),
        'lars': lambda params, config: LARS(params, lr=optimizer_params['lr'], **config),
        'adam': lambda params, config: Adam(params, lr=optimizer_params['lr'], **config),
        'sgd': lambda params, config: SGD(params, lr=optimizer_params['lr'], **config),
    }

    if optimizer_name not in optimizer_constructors:
        raise ValueError(f"Optimizer '{optimizer_name}' is not supported.")

    opt_config = optimizer_params['optimizer'][optimizer_name]
    opt_config = {k: v for k, v in opt_config.items() if k != 'use'}

    optimizer = optimizer_constructors[optimizer_name](param_groups, opt_config)

    if scheduler_name is not None:
        scheduler_constructors = {
            'cosine': lambda opt: CosineAnnealingLR(
                opt,
                T_max=num_batches_per_epoch * optimizer_params['scheduler']['cosine']['T_max'],
                eta_min=optimizer_params['scheduler']['cosine'].get('eta_min', 0.01),
            ),
            'step': lambda opt: StepLR(
                opt,
                step_size=num_batches_per_epoch * optimizer_params['scheduler']['step']['step_size'],
                gamma=optimizer_params['scheduler']['step'].get('gamma', 0.1),
            ),
            'exponential': lambda opt: ExponentialLR(
                opt,
                gamma=optimizer_params['scheduler']['exponential']['gamma']
            ),
            'warmup_cosine': lambda opt: WarmupCosineStepScheduler(
                opt,
                min_lr_factor=optimizer_params['scheduler']['warmup_cosine']['min_lr_factor'],
                warmup_steps=num_batches_per_epoch * optimizer_params['scheduler']['warmup_cosine']['warmup_steps'],
                total_steps=num_batches_per_epoch * optimizer_params['scheduler']['warmup_cosine']['total_steps'],
            ),
        }

        if scheduler_name not in scheduler_constructors:
            raise ValueError(f"Scheduler '{scheduler_name}' is not supported.")

        scheduler = {
            'scheduler': scheduler_constructors[scheduler_name](optimizer),
            'name': f"{scheduler_name}",
            'interval': 'step',
        }
        
    else:
        scheduler = None

    return optimizer, scheduler

def define_param_groups_multiple_lr(model_group_dict, weight_decay_dict, lr_rates):
    def exclude_from_wd(name, module):
        return (
            'bias' in name or
            'layernorm' in name or
            'position_embeddings' in name or
            'mask_token' in name or
            'cls_token' in name or
            'log_temperature' in name or
            'projection_head' in name or
            'linear_probe_layer' in name or
            'linear_layer_reduction' in name or
            'attention_pooling' in name or
            'layerscale' in name or 
            'bn' in name or isinstance(module, (nn.LayerNorm,
                                                nn.BatchNorm1d,
                                                nn.BatchNorm2d,
                                                nn.BatchNorm3d))
        )
    
    param_groups = {}
    for key in lr_rates:
        param_groups[f'{key}/wd'] = []
        param_groups[f'{key}/no_wd'] = []

    for group_key, models in model_group_dict.items():
        if not isinstance(models, list):
            models = [models]
        for model in models:
            for name, p in model.named_parameters():
                if p.requires_grad:
                    module_name = name.rsplit('.', 1)[0]
                    module = dict(model.named_modules()).get(module_name, model)
                    group = f'{group_key}/no_wd' if exclude_from_wd(name, module) else f'{group_key}/wd'
                    param_groups[group].append(p)

    param_groups_list = []

    for key, params in param_groups.items():
            group_name = f"{key.split('/')[0]}_{key.split('/')[1]}_params"

            param_groups_list.append({
                'params': params,
                'weight_decay': 0.0 if 'no_wd' in key else weight_decay_dict.get(key.split('/')[0]),
                'lr': lr_rates[key.split('/')[0]],
                'name': group_name
            })
            
    return param_groups_list


def create_optimizer_and_scheduler_multiple_lr(model_group_dict, optimizer_params, num_batches_per_epoch):
    lr_rates = {
        'lr': optimizer_params['lr'],
        'imag_encoder': optimizer_params['lr_image_encoder'],
        'ecg_encoder': optimizer_params['lr_ecg_encoder']
    }

    weight_decay_dict = {
        'lr': optimizer_params['weight_decay'],
        'imag_encoder': optimizer_params['weight_decay_image_encoder'],
        'ecg_encoder': optimizer_params['weight_decay_ecg_encoder']
    }

    try:
        optimizer_name = next(
            n for n, config in optimizer_params['optimizer'].items() if config.get('use', True)
        )
    except StopIteration:
        raise ValueError("Configuration error: No optimizer is marked for use.")

    try:
        scheduler_name = next(
            n for n, config in optimizer_params['scheduler'].items() if config.get('use', True)
        )
    except StopIteration:
        scheduler_name = None

    param_groups = define_param_groups_multiple_lr(model_group_dict=model_group_dict, weight_decay_dict=weight_decay_dict, lr_rates=lr_rates)

    optimizer_constructors = {
        'adamw': lambda params, config: AdamW(params, **config),
        'lars': lambda params, config: LARS(params, **config),
        'adam': lambda params, config: Adam(params, **config),
        'sgd': lambda params, config: SGD(params, **config),
    }

    if optimizer_name not in optimizer_constructors:
        raise ValueError(f"Optimizer '{optimizer_name}' is not supported.")

    opt_config = optimizer_params['optimizer'][optimizer_name]
    opt_config = {k: v for k, v in opt_config.items() if k != 'use'}

    optimizer = optimizer_constructors[optimizer_name](param_groups, opt_config)

    if scheduler_name is not None:
        scheduler_constructors = {
            'cosine': lambda opt: CosineAnnealingLR(
                opt,
                T_max=num_batches_per_epoch * optimizer_params['scheduler']['cosine']['T_max'],
                eta_min=optimizer_params['scheduler']['cosine'].get('eta_min', 0.01),
            ),
            'step': lambda opt: StepLR(
                opt,
                step_size=num_batches_per_epoch * optimizer_params['scheduler']['step']['step_size'],
                gamma=optimizer_params['scheduler']['step'].get('gamma', 0.1),
            ),
            'exponential': lambda opt: ExponentialLR(
                opt,
                gamma=optimizer_params['scheduler']['exponential']['gamma']
            ),
            'warmup_cosine': lambda opt: WarmupCosineStepScheduler(
                opt,
                min_lr_factor=optimizer_params['scheduler']['warmup_cosine']['min_lr_factor'],
                warmup_steps=num_batches_per_epoch * optimizer_params['scheduler']['warmup_cosine']['warmup_steps'],
                total_steps=num_batches_per_epoch * optimizer_params['scheduler']['warmup_cosine']['total_steps'],
            ),
        }

        if scheduler_name not in scheduler_constructors:
            raise ValueError(f"Scheduler '{scheduler_name}' is not supported.")

        scheduler = {
            'scheduler': scheduler_constructors[scheduler_name](optimizer),
            'name': f"{scheduler_name}",
            'interval': 'step',
        }
        
    else:
        scheduler = None

    return optimizer, scheduler


class LARS(Optimizer):
    r"""Implements LARS (Layer-wise Adaptive Rate Scaling).
    Args:
        params (iterable): iterable of parameters to optimize or dicts defining
            parameter groups
        lr (float): learning rate
        momentum (float, optional): momentum factor (default: 0)
        eta (float, optional): LARS coefficient as used in the paper (default: 1e-3)
        weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
        dampening (float, optional): dampening for momentum (default: 0)
        nesterov (bool, optional): enables Nesterov momentum (default: False)
        epsilon (float, optional): epsilon to prevent zero division (default: 0)
    Example:
        >>> optimizer = torch.optim.LARS(model.parameters(), lr=0.1, momentum=0.9)
        >>> optimizer.zero_grad()
        >>> loss_fn(model(input), target).backward()
        >>> optimizer.step()
    """
    def __init__(self, params, lr=required, momentum=0, eta=1e-3, dampening=0,
                 weight_decay=0, nesterov=False, epsilon=0):
        if lr is not required and lr < 0.0:
            raise ValueError("Invalid learning rate: {}".format(lr))
        if momentum < 0.0:
            raise ValueError("Invalid momentum value: {}".format(momentum))
        if weight_decay < 0.0:
            raise ValueError("Invalid weight_decay value: {}".format(weight_decay))

        defaults = dict(lr=lr, momentum=momentum, eta=eta, dampening=dampening,
                        weight_decay=weight_decay, nesterov=nesterov, epsilon=epsilon)
        if nesterov and (momentum <= 0 or dampening != 0):
            raise ValueError("Nesterov momentum requires a momentum and zero dampening")
        super(LARS, self).__init__(params, defaults)

    def __setstate__(self, state):
        super(LARS, self).__setstate__(state)
        for group in self.param_groups:
            group.setdefault('nesterov', False)

    def step(self, closure=None):
        """Performs a single optimization step.
        Arguments:
            closure (callable, optional): A closure that reevaluates the model
                and returns the loss.
        """
        loss = None
        if closure is not None:
            loss = closure()

        for group in self.param_groups:
            weight_decay = group['weight_decay']
            momentum = group['momentum']
            eta = group['eta']
            dampening = group['dampening']
            nesterov = group['nesterov']
            epsilon = group['epsilon']

            for p in group['params']:
                if p.grad is None:
                    continue
                w_norm = torch.norm(p.data)
                g_norm = torch.norm(p.grad.data)
                if w_norm * g_norm > 0:
                    local_lr = eta * w_norm / (g_norm +
                        weight_decay * w_norm + epsilon)
                else:
                    local_lr = 1
                d_p = p.grad.data
                if weight_decay != 0:
                    d_p.add_(p.data, alpha=weight_decay)
                if momentum != 0:
                    param_state = self.state[p]
                    if 'momentum_buffer' not in param_state:
                        buf = param_state['momentum_buffer'] = torch.clone(d_p).detach()
                    else:
                        buf = param_state['momentum_buffer']
                    buf.mul_(momentum).add_(d_p, alpha=1 - dampening)
                    if nesterov:
                        d_p = d_p.add(momentum, buf)
                    else:
                        d_p = buf

                p.data.add_(d_p, alpha=-local_lr * group['lr'])

        return loss
    
def compute_mean_stds(numpy_array):
    means = np.mean(numpy_array, axis=0)
    stds = np.std(numpy_array, axis=0)
    return means, stds  

def filter_ed_labels(labels):
    return [i for i, label in enumerate(labels) if "ED" in label]


class Plotter:
    def __init__(self, units: list, labels_names: list) -> None:
        self.units = units
        self.labels_names = labels_names

    def plot_results(self, preds: torch.Tensor, labels: torch.Tensor, prefix: str = "val_plot"):
        preds = preds.cpu().numpy()
        labels = labels.cpu().numpy()

        for idx, name in enumerate(self.labels_names):
            plt.figure(figsize=(6, 6))
            plt.scatter(labels[:, idx], preds[:, idx], alpha=0.5)
            plt.xlabel(f'Actual {name} ({self.units[idx]})')
            plt.ylabel(f'Predicted {name} ({self.units[idx]})')

            if prefix == "val_plot":
                plt.title(f'Prediction vs Actual for {name}')
            elif prefix == "best_val_plot":
                plt.title(f'Best Model - Prediction vs Actual for {name}')
            
            plt.plot([labels[:, idx].min(), labels[:, idx].max()],
                     [labels[:, idx].min(), labels[:, idx].max()], 'r--')
            plt.grid(True)

            wandb.log({f"{prefix}/{name}": wandb.Image(plt.gcf())})
            plt.close()


def plot_epoch_heatmap(avg_similarities_epoch, stage='train', heatmap_type='time_step'):
    plt.figure(figsize=(8, 6))
    ax = sns.heatmap(avg_similarities_epoch.numpy(), cmap='viridis')

    ax.invert_yaxis()

    if heatmap_type == 'time_step':
        plt.title(f'Average Cosine Similarity Between Time Steps For Non Projected ({stage.capitalize()})')
        plt.xlabel('Time Steps in Modality B (MRI)')
        plt.ylabel('Time Steps in Modality A (ECG)')
        wandb.log({f"{stage}_epoch_time_similarities_non_proj": wandb.Image(plt.gcf())})
    elif heatmap_type == 'time_step_proj_tokens':
        plt.title(f'Average Cosine Similarity Between Time Steps For Projected Tokens({stage.capitalize()})')
        plt.xlabel('Time Steps in Modality B (MRI)')
        plt.ylabel('Time Steps in Modality A (ECG)')
        wandb.log({f"{stage}_epoch_time_similarities_proj": wandb.Image(plt.gcf())})
    elif heatmap_type == 'sample':
        plt.title(f'Average Cosine Similarity Between Samples ({stage.capitalize()})')
        plt.xlabel('Samples in Modality B (MRI)')
        plt.ylabel('Samples in Modality A (ECG)')
        wandb.log({f"{stage}_epoch_sample_similarities": wandb.Image(plt.gcf())})
    else:
        raise ValueError("Invalid heatmap_type specified: choose 'time_step' or 'sample'")
    plt.close()


def compute_batch_similarity(z1, z2):
    z1_normalized = F.normalize(z1, dim=-1)  # Shape: [N, T, D]
    z2_normalized = F.normalize(z2, dim=-1)  # Shape: [N, T, D]

    # Compute cosine similarities between all time steps for each sample in the batch: [N, T, T]
    similarities = torch.einsum('ntd,nkd->ntk', z1_normalized, z2_normalized)

    # Average over the batch dimension: [T, T]
    avg_similarities = similarities.mean(dim=0).float()
    avg_similarities = avg_similarities.cpu().detach()
    
    return avg_similarities

def compute_cross_batch_similarity(z1, z2):
    """
    Compute pairwise cosine similarities for both same-patient and cross-patient comparisons.

    Args:
        z1 (torch.Tensor): [N, T, D] embeddings for ECG.
        z2 (torch.Tensor): [N, T, D] embeddings for MRI.

    Returns:
        torch.Tensor: [N, T, T] same-patient time-step similarity matrix.
        torch.Tensor: [N, N, T, T] cross-patient time-step similarity matrix.
    """
    z1_normalized = F.normalize(z1, dim=-1)  # [N, T, D]
    z2_normalized = F.normalize(z2, dim=-1)  # [N, T, D]

    # Compute pairwise cosine similarities for all patients: [N, N, T, T]
    all_patient_similarities = torch.einsum('ntd,mkd->nmtk', z1_normalized, z2_normalized).cpu().detach()

    return all_patient_similarities


def compute_sample_similarity(z1, z2):
    # z1 and z2: [N, D]
    z1_normalized = F.normalize(z1, dim=-1)  # Shape: [N, D]
    z2_normalized = F.normalize(z2, dim=-1)  # Shape: [N, D]

    similarities = torch.mm(z1_normalized, z2_normalized.T).float()

    similarities = similarities.cpu().detach()

    return similarities

def plot_similarity_vs_time_difference(avg_similarities_epoch, stage='train', type='non_proj', log_wandb=True):
    """
    Plot the average cosine similarity versus the time step difference.

    :param avg_similarities_epoch: Tensor of shape [T, T], average similarities over the epoch
    :param stage: 'train' or 'val', for logging purposes
    """
    T = avg_similarities_epoch.shape[0]
    time_indices = torch.arange(T)
    time_diffs = (time_indices.unsqueeze(0) - time_indices.unsqueeze(1)).abs()  # Shape: [T, T]

    similarities = avg_similarities_epoch  # Shape: [T, T]

    max_time_diff = T - 1
    avg_similarities_per_time_diff = []

    for d in range(max_time_diff + 1):
        mask = (time_diffs == d)
        selected_sims = similarities[mask]
        avg_sim = selected_sims.mean().item()
        avg_similarities_per_time_diff.append(avg_sim)

    plt.figure(figsize=(8, 6))
    plt.plot(range(max_time_diff + 1), avg_similarities_per_time_diff, marker='o')
    if type == 'non_proj':
        plt.title(f'Cosine Similarity vs Time Step Difference ({stage.capitalize()}) for Non Projected Tokens')
    elif type == 'proj':
        plt.title(f'Cosine Similarity vs Time Step Difference ({stage.capitalize()}) for Projected Tokens')
    plt.xlabel('Time Step Difference |t - t\'|')
    plt.ylabel('Average Cosine Similarity')
    plt.grid(True)
    if log_wandb:
        if type == 'non_proj':
            wandb.log({f"{stage}_cosine_similarity_vs_time_difference_non_proj": wandb.Image(plt.gcf())})
        elif type == 'proj':
            wandb.log({f"{stage}_cosine_similarity_vs_time_difference_proj": wandb.Image(plt.gcf())})
    plt.close()
