import math
import numpy as np
import torch.nn as nn

from scipy.stats import pearsonr
from torch.optim.lr_scheduler import LambdaLR


def r2_score_np(y_true, y_pred):
    """
    y_true, y_pred => shape [N, dim]
    Returns an array of length 'dim' with R^2 per dimension.
    """
    ss_res = np.sum((y_true - y_pred)**2, axis=0)
    ss_tot = np.sum((y_true - np.mean(y_true, axis=0))**2, axis=0)
    return 1 - ss_res/ss_tot

def correlation_with_significance(y_true, y_pred):
    """
    Returns correlation coefficient, p-values, and confidence intervals per dimension as lists.
    """
    corr = []
    p_values = []
    ci_lower = []
    ci_upper = []

    for i in range(y_true.shape[1]):
        res = pearsonr(y_pred[:, i], y_true[:, i])
        c = res.statistic
        p = res.pvalue

        ci = res.confidence_interval()
        
        # Append results
        corr.append(c)
        p_values.append(p)
        ci_lower.append(ci.low)
        ci_upper.append(ci.high)

    return corr, p_values, ci_lower, ci_upper

def cosine_with_warmup_scheduler(optimizer, warmup_epochs, total_epochs, base_lr, min_lr):
    """
    Returns a scheduler for cosine annealing with a warmup phase.
    """
    def lr_lambda(epoch):
        if epoch < warmup_epochs:
            return epoch / warmup_epochs
        else:
            cosine_decay = 0.5 * (1 + math.cos(math.pi * (epoch - warmup_epochs) / (total_epochs - warmup_epochs)))
            return (min_lr + (base_lr - min_lr) * cosine_decay) / base_lr
    return LambdaLR(optimizer, lr_lambda)
    
class AttentionPool(nn.Module):
    def __init__(self, embedding_dim, fc_norm=True):
        super(AttentionPool, self).__init__()
        self.attention = nn.MultiheadAttention(embed_dim=embedding_dim, num_heads=8, batch_first=True)
        self.fc_norm = nn.LayerNorm(embedding_dim) if fc_norm else nn.Identity()

    def forward(self, x):
        # Extract q, k, v without cls token
        q = x[:, 1:, :].mean(dim=1, keepdim=True)  # [B, 1, D]
        k = x[:, 1:, :]  # [B, T-1, D]
        v = x[:, 1:, :]  # [B, T-1, D]

        # Apply attention pooling
        x, _ = self.attention(q, k, v)

        # Apply normalization
        outcome = self.fc_norm(x.squeeze(dim=1))  # [B, D]
        return outcome
    
class RegressionHead(nn.Module):
    def __init__(self, input_dim=768, output_dim=10):
        super().__init__()
        self.linear = nn.Linear(input_dim, output_dim)
    def forward(self, x):
        return self.linear(x)
    
class ClassificationHead(nn.Module):
    def __init__(self, input_dim=768, num_classes=4):
        super().__init__()
        self.linear = nn.Linear(input_dim, num_classes)
    def forward(self, x):
        return self.linear(x)  # shape [batch_size, num_classes]
    
    

if __name__ == "__main__":
    import os
    import numpy as np
    from ecgcmr.utils.misc import fix_seed

    from torch.utils.data import DataLoader
    from ecgcmr.signal.sig_datasets.DownstreamECGDataset import DownstreamECGDatasetEvaluation
    from ecgcmr.multimodal.multimodal_utils.misc import downstream_collate_fn
    from transformers import ViTMAEConfig, ViTMAEModel

    from ecgcmr.signal.inference_ecg import (
        train_regression_ecg,
        train_ecg_regression_supervised,
        sklearn_regression,
        train_fine_tuning_with_grid_search,
        grid_search_regression_ecg,
        train_logistic_regression,
        train_classification
    )
    
    from ecgcmr.imaging.inference_imaging import train_mri_regression_supervised, train_regression_mri
    
    from hydra import initialize, compose
    
    fix_seed(987612)
    
    batch_size = 16

    # Initialize Hydra with the correct relative path
    with initialize(config_path="../conf", version_base=None):
        cfg = compose('base')
        cfg.dataset.batch_size = batch_size
        
    
    disease_map = {
        "MI": ["I21"],
        "CM": ["I42"],
        # "MD" : ["I40", "I43"],
        # "IHD": ["I21", "I25"],
        # "VHD": ["I34", "I35", "I36", "I37"],
        # "PD": ["I30", "I31"],
        "AF": ["I48"],
        "HF": ["I50"],
        # "CAD": ["I20", "I21", "I22", "I23", "I24", "I25"],
    }
    
    from ecgcmr.signal.sig_datasets.DownstreamECGDataset import DownstreamECGDataset
    
    dat_train = DownstreamECGDataset(cfg=cfg, mode='train', apply_augmentations=False) # DownstreamECGDatasetEvaluation(cfg, 'train', disease_map=disease_map, apply_augmentations=False)
    dat_val = DownstreamECGDataset(cfg=cfg, mode='val', apply_augmentations=False) # DownstreamECGDatasetEvaluation(cfg, 'val', disease_map=disease_map, apply_augmentations=False)
    
    # d_train = DataLoader(
    #     dat_train,
    #     batch_size=cfg.dataset.batch_size,
    #     shuffle=True,
    #     num_workers=4,
    #     drop_last=False,
    #     pin_memory=True,
    #     collate_fn=downstream_collate_fn,
    # )

    # d_val = DataLoader(
    #     dat_val,
    #     batch_size=cfg.dataset.batch_size,
    #     shuffle=False,
    #     num_workers=4,
    #     drop_last=False,
    #     pin_memory=True,
    #     collate_fn=downstream_collate_fn,
    # )
    
    d_train = DataLoader(
        dat_train,
        batch_size=cfg.dataset.batch_size,
        shuffle=True,
        num_workers=4,
        pin_memory=True,
    )

    d_val = DataLoader(
        dat_val,
        batch_size=cfg.dataset.batch_size,
        shuffle=False,
        num_workers=4,
        pin_memory=True,
    )
    
    # Load the model
    from transformers import ViTMAEConfig, ViTMAEModel
    
    # ecg_encoder_config = ViTMAEConfig(
    #     hidden_size=512,
    #     num_hidden_layers=6,
    #     num_attention_heads=8,
    #     intermediate_size=2048,
    #     hidden_act="gelu",
    #     hidden_dropout_prob=0.1,
    #     attention_probs_dropout_prob=0.1,
    #     initializer_range=0.02,
    #     layer_norm_eps=1e-12,
    #     qkv_bias=True,
    #     image_size=(12, 2500),
    #     patch_size=(1, 50),
    #     num_channels=1,
    #     mask_ratio=0.0,
    #     mask_loss=False,
    #     use_cls_token=True,
    #     use_learnable_pos_emb=True,
    # )

    # ecg_encoder = ViTMAEModel(config=ecg_encoder_config)
    
    model_path = "XXXXX"
    
    ecg_encoder_config = ViTMAEConfig.from_pretrained(model_path)
    ecg_encoder = ViTMAEModel.from_pretrained(model_path, config=ecg_encoder_config)

    # print(ecg_encoder)
    
    # epochs = 20
    # train_fine_tuning_with_grid_search(
    #     train_loader=d_train,
    #     val_loader=d_val,
    #     path_ecg_checkpoint=model_path,
    #     device="cuda",
    #     epochs=epochs,
    # )
    
    # epochs = 120
    # train_regression_ecg(ecg_encoder,
    #                      RegressionHead(input_dim=512, output_dim=10),
    #                      d_train, d_val,
    #                      device="cuda", epochs=epochs)

    # epochs = 120
    # train_ecg_regression_supervised(ecg_encoder,
    #                                 RegressionHead(input_dim=512, output_dim=10),
    #                                 d_train, d_val,
    #                                 device="cuda", epochs=epochs)

    result = sklearn_regression(
        ecg_model=ecg_encoder,
        train_loader=d_train,
        val_loader=d_val,
        mean_train_labels_vol=np.load(cfg.downstream_task.paths.mean_train_labels_vol),
        std_train_labels_vol=np.load(cfg.downstream_task.paths.std_train_labels_vol),
    )
    
    # train_disease_np = dat_train.disease_labels
    # counts_ones = train_disease_np.sum(axis=0)
    # counts_zeros = train_disease_np.shape[0] - counts_ones
    # pos_weight_np = counts_zeros / np.maximum(counts_ones, 1)
    # pos_weight_tensor = torch.from_numpy(pos_weight_np).float()
    

    # # # cls_head = ClassificationHead(input_dim=512, num_classes=len(disease_map))
    
    # train_logistic_regression(
    #     ecg_model=ecg_encoder,
    #     train_loader=d_train,
    #     val_loader=d_val,
    #     device="cuda",
    #     pos_weight_tensor=pos_weight_tensor,
    #     disease_names=disease_map.keys(),
    # )
    
    # train_classification(
    #     ecg_model=ecg_encoder,
    #     cls_head=cls_head,
    #     train_loader=d_train,
    #     val_loader=d_val,
    #     device="cuda",
    #     epochs=20,
    #     pos_weight_tensor=pos_weight_tensor,
    #     disease_names=["MI", "CM", "AF", "HF"],
    #     threshold=0.5,
    #     lr=3e-5
    # )
    
    # results = grid_search_regression_ecg(
    #     ecg_model_path=model_path,
    #     train_loader=d_train,
    #     val_loader=d_val,
    #     device="cuda",
    #     epochs=30,  
    #     lr_values=[1e-5, 3e-5, 1e-4],
    #     wd_values=[1e-6],
    # )
    
    # print("\nAll grid search results:")
    # for (lr, wd), val_loss in results.items():
    #     print(f"LR={lr}, WD={wd} => ValLoss={val_loss:.4f}")