import wandb
import torch
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns

from omegaconf import DictConfig
from lightning import Callback
from sklearn.metrics import r2_score
from sklearn.linear_model import LinearRegression, Ridge
from torchmetrics import Metric

from ecgcmr.utils.misc import filter_ed_labels


class LogCMRPhenoTypesGraph(Callback):
    def __init__(self, cfg: DictConfig, mask_labels: bool, prefix: str = "val_plot") -> None:
        super().__init__()

        self.mask_labels = mask_labels
        self.type = cfg.downstream_task.type
        self.prefix = prefix

        self.last_val_batch = None
        self.last_train_batch = None
        
        self.all_test_preds = []
        self.all_test_labels = []

        if self.type == 'vol':
            target_names_full = cfg.downstream_task.target_vol
            units_names_full = cfg.downstream_task.units_vol
        elif self.type == 'area':
            target_names_full = cfg.downstream_task.target_area
            units_names_full = cfg.downstream_task.units_area

        if self.mask_labels:
            self.ids_to_take = filter_ed_labels(target_names_full) # RESNET CASE - Filter only ED frame labels
            self.labels_names = [target_names_full[i] for i in self.ids_to_take]
            self.units = [units_names_full[i] for i in self.ids_to_take]
        else:
            self.ids_to_take = [i for i in range(len(target_names_full))]
            self.labels_names = target_names_full
            self.units = units_names_full

        if self.type == 'vol':
            self.means_train = np.load(cfg.downstream_task.paths.mean_train_labels_vol, mmap_mode='r')[self.ids_to_take]
            self.stds_train = np.load(cfg.downstream_task.paths.std_train_labels_vol, mmap_mode='r')[self.ids_to_take]
            self.means_val = np.load(cfg.downstream_task.paths.mean_val_labels_vol, mmap_mode='r')[self.ids_to_take]
            self.stds_val = np.load(cfg.downstream_task.paths.std_val_labels_vol, mmap_mode='r')[self.ids_to_take]
            self.means_test = np.load(cfg.downstream_task.paths.mean_test_labels_vol, mmap_mode='r')[self.ids_to_take]
            self.stds_test = np.load(cfg.downstream_task.paths.std_test_labels_vol, mmap_mode='r')[self.ids_to_take]
        elif self.type == 'area':
            self.means_train = np.load(cfg.downstream_task.paths.mean_train_labels_area, mmap_mode='r')[self.ids_to_take]
            self.stds_train = np.load(cfg.downstream_task.paths.std_train_labels_area, mmap_mode='r')[self.ids_to_take]
            self.means_val = np.load(cfg.downstream_task.paths.mean_val_labels_area, mmap_mode='r')[self.ids_to_take]
            self.stds_val = np.load(cfg.downstream_task.paths.std_val_labels_area, mmap_mode='r')[self.ids_to_take]
            self.means_test = np.load(cfg.downstream_task.paths.mean_test_labels_area, mmap_mode='r')[self.ids_to_take]
            self.stds_test = np.load(cfg.downstream_task.paths.std_test_labels_area, mmap_mode='r')[self.ids_to_take]
    
    def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx=0):
        self.last_train_batch = {
            'pred': outputs['pred'].detach().cpu(),
            'label': outputs['label'].detach().cpu()
        }
    
    def on_train_epoch_end(self, trainer, pl_module) -> None:
        if self.last_train_batch is None:
            return

        predictions = self.last_train_batch['pred'].to(torch.float32).numpy()
        targets = self.last_train_batch['label'].to(torch.float32).numpy()

        recovered_predictions = predictions * self.stds_train + self.means_train
        recovered_targets = targets * self.stds_train + self.means_train

        for idx, name in enumerate(self.labels_names):
            plt.figure(figsize=(6, 6))
            plt.scatter(recovered_targets[:, idx], recovered_predictions[:, idx], alpha=0.5)
            plt.xlabel(f'Actual {name} ({self.units[idx]})')
            plt.ylabel(f'Predicted {name} ({self.units[idx]})')
            plt.title(f'Prediction vs Actual for {name}')
            plt.plot([recovered_targets[:, idx].min(), recovered_targets[:, idx].max()], 
                     [recovered_targets[:, idx].min(), recovered_targets[:, idx].max()], 'r--')
            plt.grid(True)

            wandb.log({f"train_plot/{name}": wandb.Image(plt.gcf())})

            plt.close()

        self.last_train_batch = None

    def on_validation_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx=0):
        self.last_val_batch = {
            'pred': outputs['pred'].detach(),
            'label': outputs['label'].detach()
        }

    def on_validation_epoch_end(self, trainer, pl_module) -> None:
        if self.last_val_batch is None:
            return

        predictions = self.last_val_batch['pred'].to(torch.float32).cpu().numpy()
        targets = self.last_val_batch['label'].to(torch.float32).cpu().numpy()

        recovered_predictions = predictions * self.stds_val + self.means_val
        recovered_targets = targets * self.stds_val + self.means_val

        for idx, name in enumerate(self.labels_names):
            plt.figure(figsize=(6, 6))
            plt.scatter(recovered_targets[:, idx], recovered_predictions[:, idx], alpha=0.5)
            plt.xlabel(f'Actual {name} ({self.units[idx]})')
            plt.ylabel(f'Predicted {name} ({self.units[idx]})')
            plt.title(f'Prediction vs Actual for {name}')
            plt.plot([recovered_targets[:, idx].min(), recovered_targets[:, idx].max()], 
                     [recovered_targets[:, idx].min(), recovered_targets[:, idx].max()], 'r--')
            plt.grid(True)

            wandb.log({f"{self.prefix}/{name}": wandb.Image(plt.gcf())})

            plt.close()

        self.last_val_batch = None
    
    def on_test_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx=0):
        preds = outputs['pred'].detach().to(torch.float32).cpu().numpy()
        labels = outputs['label'].detach().to(torch.float32).cpu().numpy()
        
        self.all_test_preds.append(preds)
        self.all_test_labels.append(labels)

    def on_test_epoch_end(self, trainer, pl_module):
        all_preds = np.concatenate(self.all_test_preds, axis=0)
        all_labels = np.concatenate(self.all_test_labels, axis=0)

        recovered_preds = all_preds * self.stds_test + self.means_test
        recovered_labels = all_labels * self.stds_test + self.means_test

        for idx, name in enumerate(self.labels_names):
            plt.figure(figsize=(6, 6))
            plt.scatter(recovered_labels[:, idx], recovered_preds[:, idx], alpha=0.5)
            plt.xlabel(f'Actual {name} ({self.units[idx]})')
            plt.ylabel(f'Predicted {name} ({self.units[idx]})')
            plt.title(f'Prediction vs Actual for {name}')
            plt.plot([recovered_labels[:, idx].min(), recovered_labels[:, idx].max()], 
                     [recovered_labels[:, idx].min(), recovered_labels[:, idx].max()], 'r--')
            plt.grid(True)

            wandb.log({f"test_plot/{name}": wandb.Image(plt.gcf())})
            plt.close()

        self.all_test_preds = []
        self.all_test_labels = []


class SkLearn_RegressionCallback(Callback):
    def __init__(self, cfg, mask_labels, plot_correlation=False, model_type='linear'):
        self.train_features = []
        self.train_labels = []
        self.val_features = []
        self.val_labels = []

        self.plot_correlation = plot_correlation

        if model_type == 'ridge':
            self.model = Ridge()
        elif model_type == 'linear':
            self.model = LinearRegression()

        self.type = cfg.downstream_task.type

        if self.type == 'vol':
            target_names_full = cfg.downstream_task.target_vol
            units_names_full = cfg.downstream_task.units_vol
        elif self.type == 'area':
            target_names_full = cfg.downstream_task.target_area
            units_names_full = cfg.downstream_task.units_area

        if mask_labels:
            self.ids_to_take = filter_ed_labels(target_names_full) # RESNET CASE - Filter only ED frame labels
            self.labels_names = [target_names_full[i] for i in self.ids_to_take]
            self.units = [units_names_full[i] for i in self.ids_to_take]
        else:
            self.ids_to_take = [i for i in range(len(target_names_full))]
            self.labels_names = target_names_full
            self.units = units_names_full

        if self.type == 'vol':
            self.means_train = np.load(cfg.downstream_task.paths.mean_train_labels_vol, mmap_mode='r')[self.ids_to_take]
            self.stds_train = np.load(cfg.downstream_task.paths.std_train_labels_vol, mmap_mode='r')[self.ids_to_take]
            self.means_val = np.load(cfg.downstream_task.paths.mean_val_labels_vol, mmap_mode='r')[self.ids_to_take]
            self.stds_val = np.load(cfg.downstream_task.paths.std_val_labels_vol, mmap_mode='r')[self.ids_to_take]
            self.means_test = np.load(cfg.downstream_task.paths.mean_test_labels_vol, mmap_mode='r')[self.ids_to_take]
            self.stds_test = np.load(cfg.downstream_task.paths.std_test_labels_vol, mmap_mode='r')[self.ids_to_take]
        elif self.type == 'area':
            self.means_train = np.load(cfg.downstream_task.paths.mean_train_labels_area, mmap_mode='r')[self.ids_to_take]
            self.stds_train = np.load(cfg.downstream_task.paths.std_train_labels_area, mmap_mode='r')[self.ids_to_take]
            self.means_val = np.load(cfg.downstream_task.paths.mean_val_labels_area, mmap_mode='r')[self.ids_to_take]
            self.stds_val = np.load(cfg.downstream_task.paths.std_val_labels_area, mmap_mode='r')[self.ids_to_take]
            self.means_test = np.load(cfg.downstream_task.paths.mean_test_labels_area, mmap_mode='r')[self.ids_to_take]
            self.stds_test = np.load(cfg.downstream_task.paths.std_test_labels_area, mmap_mode='r')[self.ids_to_take]
        
    def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx=0):
        self.train_features.append(outputs['features'].detach().cpu().to(torch.float32).numpy())
        self.train_labels.append(outputs['label'].detach().cpu().to(torch.float32).numpy())
    
    def on_validation_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx=0):
        self.val_features.append(outputs['features'].detach().cpu().to(torch.float32).numpy())
        self.val_labels.append(outputs['label'].detach().cpu().to(torch.float32).numpy())

    def on_validation_epoch_end(self, trainer, pl_module):
        val_features = np.concatenate(self.val_features)
        val_labels = np.concatenate(self.val_labels)

        train_features_all = np.concatenate(self.train_features)
        train_labels_all = np.concatenate(self.train_labels)

        if self.plot_correlation:
            feature_names = [f'{i+1}' for i in range(train_features_all.shape[1])]
            features_df = pd.DataFrame(train_features_all, columns=feature_names)
            corr_matrix = features_df.corr()

            plt.figure(figsize=(20, 16))

            # Mask to only show the upper triangle (since the matrix is symmetrical)
            mask = np.triu(np.ones_like(corr_matrix, dtype=bool))

            # Create the heatmap
            sns.heatmap(corr_matrix, mask=mask, annot=False, cmap='coolwarm', fmt=".2f", 
                        linewidths=.5, cbar_kws={"shrink": .8}, vmin=-1, vmax=1)

            # Apply hierarchical clustering to order the features by similarity
            sns.clustermap(corr_matrix, cmap='coolwarm', linewidths=.5, figsize=(20, 16))

            plt.title('Feature Correlation Matrix')
            plt.xticks(rotation=90)
            plt.yticks(rotation=0)
            plt.tight_layout()
            wandb.log({f"features_correlation/train": wandb.Image(plt.gcf())})
            plt.close()

        self.model.fit(train_features_all, train_labels_all)
        
        # Make predictions and adjust for normalization
        predictions = self.model.predict(val_features)
        predictions = predictions * self.stds_val + self.means_val

        predictions[:, 0] = np.round(predictions[:, 0]) # ES FRAME, ONLY INTEGER

        val_labels = val_labels * self.stds_val + self.means_val

        # Evaluate the model
        for idx, label_name in enumerate(self.labels_names):
            r2 = r2_score(val_labels[:, idx], predictions[:, idx])
            # Log R^2 scores under specific label in wandb
            wandb.log({f'regression_r2_scores/val_{label_name}': r2})

            plt.figure(figsize=(6, 6))
            plt.scatter(val_labels[:, idx], predictions[:, idx], alpha=0.5)
            plt.xlabel('Actual Values')
            plt.ylabel('Predicted Values')
            plt.title(f'Predictions vs Actual for {label_name}')
            # Adding line of perfect prediction
            line_min = min(val_labels[:, idx].min(), predictions[:, idx].min())
            line_max = max(val_labels[:, idx].max(), predictions[:, idx].max())
            plt.plot([line_min, line_max], [line_min, line_max], 'r--')  # Red line for perfect prediction
            plt.grid(True)

            wandb.log({f"regression_plots/val_{label_name}": wandb.Image(plt.gcf())})
            plt.close()

        coefficients = self.model.coef_
        for idx, label_name in enumerate(self.labels_names):
            plt.figure(figsize=(10, 4))
            plt.bar(range(len(coefficients[idx])), coefficients[idx])
            plt.title(f'Coefficients for {label_name}')
            plt.xlabel('Feature Index')
            plt.ylabel('Coefficient Value')
            plt.grid(True)
            wandb.log({f'regression_coefficients/val_{label_name}': wandb.Image(plt.gcf())})
            plt.close()

        self.val_features = []
        self.val_labels = []
        self.train_features = []
        self.train_labels = []


class WeightDecayAdjustmentCallback(Callback):
    def __init__(self, increase_epoch: int, new_weight_decay:float = 0.05):
        self.increase_epoch = increase_epoch
        self.new_weight_decay = new_weight_decay

    def on_train_epoch_start(self, trainer, pl_module):
        if trainer.current_epoch == self.increase_epoch:
            for optimizer in trainer.optimizers:
                for param_group in optimizer.param_groups:
                    if 'wd' in param_group['name']:  # Check if this group originally had weight decay
                        param_group['weight_decay'] = self.new_weight_decay
                        print(f"Updated weight decay to {self.new_weight_decay} for group {param_group['name']} at epoch {trainer.current_epoch}")


class LogCMRPhenoTypesPyTorch(Metric):
    def __init__(self, cfg: DictConfig, mask_labels: bool, max_plot_points=1000):
        super().__init__()
        self.mask_labels = mask_labels
        self.type = cfg.downstream_task.type
        self.max_plot_points = max_plot_points

        if self.type == 'vol':
            target_names_full = cfg.downstream_task.target_vol
            units_names_full = cfg.downstream_task.units_vol
        elif self.type == 'area':
            target_names_full = cfg.downstream_task.target_area
            units_names_full = cfg.downstream_task.units_area

        if self.mask_labels:
            self.ids_to_take = filter_ed_labels(target_names_full)
            self.labels_names = [target_names_full[i] for i in self.ids_to_take]
            self.units = [units_names_full[i] for i in self.ids_to_take]
        else:
            self.ids_to_take = list(range(len(target_names_full)))
            self.labels_names = target_names_full
            self.units = units_names_full

        if self.type == 'vol':
            self.means_val = np.load(cfg.downstream_task.paths.mean_val_labels_vol, mmap_mode='r')[self.ids_to_take]
            self.stds_val = np.load(cfg.downstream_task.paths.std_val_labels_vol, mmap_mode='r')[self.ids_to_take]
        elif self.type == 'area':
            self.means_val = np.load(cfg.downstream_task.paths.mean_val_labels_area, mmap_mode='r')[self.ids_to_take]
            self.stds_val = np.load(cfg.downstream_task.paths.std_val_labels_area, mmap_mode='r')[self.ids_to_take]

        self.add_state("predictions", default=torch.empty(0, dtype=torch.float32), dist_reduce_fx="cat")
        self.add_state("labels", default=torch.empty(0, dtype=torch.float32), dist_reduce_fx="cat")
        self.add_state("count", default=torch.tensor(0, dtype=torch.int32), dist_reduce_fx="sum")

    def update(self, preds: torch.Tensor, labels: torch.Tensor):
        preds = preds[:, self.ids_to_take].detach().cpu()
        labels = labels[:, self.ids_to_take].detach().cpu()

        self.predictions = torch.cat([self.predictions, preds], dim=0)
        self.labels = torch.cat([self.labels, labels], dim=0)
        self.count += preds.size(0)

        if self.predictions.size(0) > self.max_plot_points:
            self.predictions = self.predictions[-self.max_plot_points:]
            self.labels = self.labels[-self.max_plot_points:]

    def compute(self):
        recovered_predictions = (self.predictions.float().numpy() * self.stds_val) + self.means_val
        recovered_targets = (self.labels.float().numpy() * self.stds_val) + self.means_val

        return recovered_predictions, recovered_targets

    def log_plots(self, prefix="val_plot"):
        recovered_predictions, recovered_targets = self.compute()

        for idx, name in enumerate(self.labels_names):
            plt.figure(figsize=(6, 6))
            plt.scatter(recovered_targets[:, idx], recovered_predictions[:, idx], alpha=0.5)
            plt.xlabel(f'Actual {name} ({self.units[idx]})')
            plt.ylabel(f'Predicted {name} ({self.units[idx]})')
            if prefix == "val_plot":
                plt.title(f'Prediction vs Actual for {name}')
            elif prefix == "best_val_plot":
                plt.title(f'Best Model - Prediction vs Actual for {name}')
            plt.plot([recovered_targets[:, idx].min(), recovered_targets[:, idx].max()],
                    [recovered_targets[:, idx].min(), recovered_targets[:, idx].max()], 'r--')
            plt.grid(True)

            wandb.log({f"{prefix}/{name}": wandb.Image(plt.gcf())})
            plt.close()

class LogBestR2ScoresCallback(Callback):
    def __init__(self, monitor_metric: str = 'val/monitor_metric', metric_instance=None, plot_callback=None):
        super().__init__()
        self.best = -float('inf')
        self.monitor_metric = monitor_metric
        self.metric_instance = metric_instance
        self.plot_callback = plot_callback

    def on_validation_epoch_end(self, trainer, pl_module):
        metrics = trainer.callback_metrics
        current_metric = metrics.get(self.monitor_metric)

        if current_metric is None:
            return

        if current_metric > self.best:
            self.best = current_metric
            print(f"New best {self.monitor_metric}: {self.best}")

            r2_scores = {k.replace('val/R2/', ''): v for k, v in metrics.items() if k.startswith('val/R2/')}

            if trainer.logger:
                r2_log_dict = {f"best_R2/{name}": value for name, value in r2_scores.items()}
                r2_log_dict["best_mean_R2"] = self.best
                trainer.logger.log_metrics(r2_log_dict, step=trainer.global_step)

            if self.plot_callback and self.metric_instance:
                self.plot_callback.prefix = "best_val_plot"
                self.plot_callback.on_validation_epoch_end(trainer, pl_module)

