import torch

import torch.optim as optim
import torch.nn as nn
from tqdm import tqdm

from ecgcmr.utils.train_evaluate import r2_score_np, correlation_with_significance, cosine_with_warmup_scheduler


def train_mri_regression_supervised(mri_encoder, reg_head, train_loader, val_loader, device="cuda", epochs=10):
    """
    mri_model: The frozen MRI mri_encoder
    reg_head:  A small linear model or MLP for regression
    train_loader, val_loader: DataLoaders returning (mri, task_labels)
    """
    for param in mri_encoder.parameters():
        param.requires_grad = True
        
    mri_encoder = mri_encoder.to(device)
    reg_head = reg_head.to(device)

    criterion = nn.MSELoss()
    
    base_lr = 3e-5
    warmup_epochs = 1
    min_lr = 1e-7
    
    optimizer = optim.Adam(
        list(mri_encoder.parameters()) + list(reg_head.parameters()),
        lr=base_lr,
        weight_decay=1e-6
    )

    scheduler = cosine_with_warmup_scheduler(optimizer, warmup_epochs, epochs, base_lr, min_lr)

    for epoch in range(epochs):
        print(f"=== Regression Epoch {epoch+1}/{epochs} ===")

        reg_head.train()
        mri_encoder.train()
        
        total_train_loss = 0

        for batch in tqdm(train_loader, desc="Training"):
            mri_data = batch[0].to(device)
            task_labels = batch[1].to(device)

            optimizer.zero_grad()

            mri_output = mri_encoder(pixel_values=mri_data, apply_masking=False, use_layernorm=True).last_hidden_state
            mri_embeddings = mri_output[:, 1:]
            global_embeddigns = torch.mean(mri_embeddings, dim=1) # [B, D]

            preds = reg_head(global_embeddigns)  # shape [batch_size, 10]
            loss = criterion(preds, task_labels)
            loss.backward()
            optimizer.step()

            total_train_loss += loss.item() * mri_data.size(0)
        
        avg_train_loss = total_train_loss / len(train_loader.dataset)

        reg_head.eval()
        mri_encoder.eval()
        
        total_val_loss = 0
        all_preds, all_targets = [], []
        with torch.no_grad():
            for batch in tqdm(val_loader, desc="Validation"):
                mri_data = batch[0].to(device)
                task_labels = batch[1].to(device)

                mri_output = mri_encoder(pixel_values=mri_data, apply_masking=False, use_layernorm=True).last_hidden_state
                mri_embeddings = mri_output[:, 1:]
                global_embeddigns = torch.mean(mri_embeddings, dim=1) # [B, D]

                preds = reg_head(global_embeddigns)
                loss = criterion(preds, task_labels)
                total_val_loss += loss.item() * mri_data.size(0)

                all_preds.append(preds.cpu())
                all_targets.append(task_labels.cpu())
        
        avg_val_loss = total_val_loss / len(val_loader.dataset)
        print(f"TrainLoss={avg_train_loss:.4f}, ValLoss={avg_val_loss:.4f}")

        # R^2 and correlation
        all_preds = torch.cat(all_preds, dim=0)   # shape [N_val, 10]
        all_targets = torch.cat(all_targets, dim=0) # shape [N_val, 10]

        preds_np = all_preds.numpy()
        targets_np = all_targets.numpy()
        r2 = r2_score_np(targets_np, preds_np)
        corr, p_values, ci_lower, ci_upper = correlation_with_significance(targets_np, preds_np)
        
        # Format the results
        significance_levels = []
        for p in p_values:
            if p < 1e-10:
                significance_levels.append("***")
            elif p < 1e-8:
                significance_levels.append("**")
            elif p < 1e-6:
                significance_levels.append("*")
            else:
                significance_levels.append("")
        
        # Print results
        print("R^2 per dimension:", r2)
        for i, (c, p, sig, lo, hi) in enumerate(zip(corr, p_values, significance_levels, ci_lower, ci_upper)):
            print(f"Dimension {i+1}: Correlation={c:.3f}, p-value={p:.6e} {sig}, CI=[{lo:.3f}, {hi:.3f}]")

        scheduler.step()
        
# LINEAR PROBE of MRI model
def train_regression_mri(mri_model, reg_head, train_loader, val_loader, device="cuda", epochs=10):
    """
    mri_model: The frozen MRI encoder
    reg_head:  A small linear model or MLP for regression
    train_loader, val_loader: DataLoaders returning (mri, task_labels)
    """
    for param in mri_model.parameters():
        param.requires_grad = False
        
    mri_model = mri_model.to(device)
    reg_head = reg_head.to(device)

    criterion = nn.MSELoss()
    
    optimizer = optim.AdamW(
        list(reg_head.parameters()),
        lr=3e-4,
        weight_decay=1e-6,
    )

    for epoch in range(epochs):
        print(f"=== Regression Epoch {epoch+1}/{epochs} ===")

        # aggregator.train()
        reg_head.train()
        mri_model.eval()
        
        total_train_loss = 0

        for batch in tqdm(train_loader, desc="Training"):
            mri_data = batch[0].to(device)
            task_labels = batch[1].to(device)

            optimizer.zero_grad()

            with torch.no_grad():
                mri_output = mri_model(pixel_values=mri_data, apply_masking=False, use_layernorm=False).last_hidden_state
                mri_embeddings = mri_output[:, 1:]
                global_embeddigns = torch.mean(mri_embeddings, dim=1) # [B, D]

            preds = reg_head(global_embeddigns)  # shape [batch_size, 10]
            loss = criterion(preds, task_labels)
            loss.backward()
            optimizer.step()

            total_train_loss += loss.item() * mri_data.size(0)
        
        avg_train_loss = total_train_loss / len(train_loader.dataset)

        reg_head.eval()
        mri_model.eval()
        
        total_val_loss = 0
        all_preds, all_targets = [], []
        with torch.no_grad():
            for batch in tqdm(val_loader, desc="Validation"):
                mri_data = batch[0].to(device)
                task_labels = batch[1].to(device)

                mri_output = mri_model(pixel_values=mri_data, apply_masking=False, use_layernorm=False).last_hidden_state
                mri_embeddings = mri_output[:, 1:]
                global_embeddigns = torch.mean(mri_embeddings, dim=1) # [B, D]

                preds = reg_head(global_embeddigns)
                loss = criterion(preds, task_labels)
                total_val_loss += loss.item() * mri_data.size(0)

                all_preds.append(preds.cpu())
                all_targets.append(task_labels.cpu())
        
        avg_val_loss = total_val_loss / len(val_loader.dataset)
        print(f"TrainLoss={avg_train_loss:.4f}, ValLoss={avg_val_loss:.4f}")

        # R^2 and correlation
        all_preds = torch.cat(all_preds, dim=0)   # shape [N_val, 10]
        all_targets = torch.cat(all_targets, dim=0) # shape [N_val, 10]

        preds_np = all_preds.numpy()
        targets_np = all_targets.numpy()
        r2 = r2_score_np(targets_np, preds_np)
        corr, p_values, ci_lower, ci_upper = correlation_with_significance(targets_np, preds_np)
        
        # Format the results
        significance_levels = []
        for p in p_values:
            if p < 1e-10:
                significance_levels.append("***")
            elif p < 1e-8:
                significance_levels.append("**")
            elif p < 1e-6:
                significance_levels.append("*")
            else:
                significance_levels.append("")
        
        # Print results
        print("R^2 per dimension:", r2)
        for i, (c, p, sig, lo, hi) in enumerate(zip(corr, p_values, significance_levels, ci_lower, ci_upper)):
            print(f"Dimension {i+1}: Correlation={c:.3f}, p-value={p:.6e} {sig}, CI=[{lo:.3f}, {hi:.3f}]")
