import torch
import wandb

from sklearn.linear_model import LinearRegression, Ridge, Lasso
from sklearn.preprocessing import StandardScaler

from torchmetrics import Metric
from torchmetrics.functional.classification import binary_auroc, binary_average_precision, binary_precision, binary_recall, binary_f1_score, binary_confusion_matrix
from torchmetrics.functional import r2_score, mean_absolute_error, mean_squared_error
from torchmetrics.utilities import dim_zero_cat 

import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np


class CustomAccuracy(Metric):
    is_differentiable: False
    higher_is_better: True

    def __init__(self, topk:int = 5, dist_sync_on_step=False):
        super().__init__(dist_sync_on_step=dist_sync_on_step)

        self.topk = topk
        self.add_state("correct_sum", default=torch.tensor(0.), dist_reduce_fx="sum")
        self.add_state("total", default=torch.tensor(0), dist_reduce_fx="sum")

    def update(self, logits: torch.Tensor, labels: torch.Tensor):
        with torch.no_grad():
            batch_size = labels.size(0)

            _, pred = logits.topk(self.topk, 1, True, True)
            labels = labels.view(-1, 1)
            
            correct = pred.eq(labels.expand_as(pred))

            self.correct_sum += correct.any(dim=1).float().sum()
            self.total += batch_size

    def compute(self):
        return self.correct_sum / self.total


class BinaryClassificationMetrics(Metric):
    def __init__(self, dist_sync_on_step: bool=False):
        super().__init__(dist_sync_on_step=dist_sync_on_step)

        self.add_state("predictions", default=[], dist_reduce_fx="cat")
        self.add_state("labels", default=[], dist_reduce_fx="cat")

    def update(self, preds: torch.Tensor, labels: torch.Tensor):
        self.predictions.append(preds)
        self.labels.append(labels)

    def compute(self):
        preds = dim_zero_cat(self.predictions)
        labels = dim_zero_cat(self.labels)
        
        precision = binary_precision(preds=preds, target=labels)
        recall = binary_recall(preds=preds, target=labels)

        auc_roc = binary_auroc(preds=preds, target=labels)
        ap = binary_average_precision(preds=preds, target=labels)
        f1 = binary_f1_score(preds=preds, target=labels)
        
        cm = binary_confusion_matrix(preds=preds, target=labels)

        return {
            "precision": precision,
            "recall": recall,
            "f1": f1,
            "auc_roc": auc_roc,
            "auc_pr": ap,
            "confusion_matrix": cm
            }


def plot_confusion_matrix(cm, class_names):
    cm_percentage = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]

    annotations = np.empty_like(cm).astype(str)
    nrows, ncols = cm.shape
    for i in range(nrows):
        for j in range(ncols):
            c = cm[i, j]
            p = cm_percentage[i, j] * 100
            s = f"{c}\n({p:.1f}%)"
            annotations[i, j] = s

    fig, ax = plt.subplots(figsize=(8, 6))
    sns.heatmap(cm, annot=annotations, fmt='', ax=ax, cmap="Blues",
                cbar_kws={'label': 'Count'}, square=True, linewidths=0.5)
    
    ax.set_xlabel('Predicted labels')
    ax.set_ylabel('True labels')
    ax.set_title('Confusion Matrix')
    ax.xaxis.set_ticklabels(class_names)
    ax.yaxis.set_ticklabels(class_names, rotation=360)

    plt.tight_layout()
    
    return fig


class RegressionMetrics(Metric):
    is_differentiable = False
    higher_is_better = {
        'R2': True,
        'MAE': False,
    }
    
    def __init__(self,
                 labels_names: list,
                 metrics: list = ['R2', 'MAE'],
                 additional_metrics: list = ['MAE_MeanGuess'],
                 dist_sync_on_step: bool = False,
                 compute_on_cpu: bool = True,
                 means_train: np.ndarray = None,
                 stds_train: np.ndarray = None,
                 units: list = None,
                 ids_to_take: list = None,
                 training_scheme: str = 'LP',
                 use_mlp: str = 'LinearLayer'
                 ):
        super().__init__(dist_sync_on_step=dist_sync_on_step, compute_on_cpu=compute_on_cpu)
        
        self.labels_names = labels_names
        self.metrics = metrics
        self.additional_metrics = additional_metrics

        self.means_train = torch.tensor(means_train, dtype=torch.float32)
        self.stds_train = torch.tensor(stds_train, dtype=torch.float32)

        self.units = units
        self.ids_to_take = ids_to_take

        self.use_mlp = use_mlp
        self.training_scheme = training_scheme

        self.add_state("predictions", default=[], dist_reduce_fx="cat")
        self.add_state("labels", default=[], dist_reduce_fx="cat")

    def update(self, preds: torch.Tensor, labels: torch.Tensor):
        filtered_preds = preds[:, self.ids_to_take].detach().cpu()
        filtered_labels = labels[:, self.ids_to_take].detach().cpu()

        self.predictions.append(filtered_preds)
        self.labels.append(filtered_labels)

    def compute(self):
        if len(self.predictions) == 0:
            raise ValueError("No validation data available for computing metrics.")

        preds = dim_zero_cat(self.predictions)
        labels = dim_zero_cat(self.labels)

        preds = preds.float() * self.stds_train + self.means_train
        labels = labels.float() * self.stds_train + self.means_train

        metrics_results = {}
        num_labels = labels.shape[1]

        for label_idx in range(num_labels):
            preds_phenotype = preds[:, label_idx]
            labels_phenotype = labels[:, label_idx]
            label_name = self.labels_names[label_idx]
            
            for metric in self.metrics:
                if metric == 'R2':
                    score = r2_score(preds=preds_phenotype, target=labels_phenotype)
                elif metric == 'MAE':
                    score = mean_absolute_error(preds=preds_phenotype, target=labels_phenotype)
                elif metric == 'MSE':
                    score = mean_squared_error(preds=preds_phenotype, target=labels_phenotype)
                else:
                    raise ValueError(f"Unsupported metric: {metric}")

                metrics_results[f"val_{self.training_scheme}_{self.use_mlp}/{metric}.{label_name}"] = score.item()
            
            if 'MAE_MeanGuess' in self.additional_metrics:
                mean_guess = torch.mean(labels_phenotype)
                mae_mean_guess = mean_absolute_error(preds=torch.full_like(labels_phenotype, mean_guess), target=labels_phenotype)
                metrics_results[f"val_mean_guess/MAE_MeanGuess.{label_name}"] = mae_mean_guess.item()

        rv_indices = [i for i, label in enumerate(self.labels_names) if 'RV' in label]
        lv_indices = [i for i, label in enumerate(self.labels_names) if 'LV' in label]

        for metric in self.metrics:
            if rv_indices:
                mean_rv = torch.mean(torch.tensor([metrics_results[f"val_{self.training_scheme}_{self.use_mlp}/{metric}.{self.labels_names[i]}"] for i in rv_indices]))
                metrics_results[f'val_{self.training_scheme}_{self.use_mlp}/Mean_RV_{metric}'] = mean_rv.item()
            if lv_indices:
                mean_lv = torch.mean(torch.tensor([metrics_results[f"val_{self.training_scheme}_{self.use_mlp}/{metric}.{self.labels_names[i]}"] for i in lv_indices]))
                metrics_results[f'val_{self.training_scheme}_{self.use_mlp}/Mean_LV_{metric}'] = mean_lv.item()

        self.predictions.clear()
        self.labels.clear()

        return metrics_results, preds, labels


class SklearnModel:
    def __init__(self,
                 labels_names: list,
                 model_type='linear',
                 means_train: np.ndarray = None,
                 stds_train: np.ndarray = None,
                 units: list = None,
                 ):
        self.labels_names = labels_names
        self.means_train = means_train
        self.stds_train = stds_train
        self.units = units

        if model_type == 'ridge':
            self.model = Ridge()
        elif model_type == 'linear':
            self.model = LinearRegression()
        elif model_type == 'lasso':
            self.model = Lasso(alpha=0.1)
        else:
            raise ValueError(f"Unsupported model_type: {model_type}")
        
        self.scaler_features = StandardScaler()

        self.train_features = []
        self.train_labels = []
        self.val_features = []
        self.val_labels = []

        self.best_mean_r2 = None
        self.best_mean_mae = None
        self.best_metrics_r2 = None
        self.best_metrics_mae = None

    def update(self, feats: np.ndarray, labels: np.ndarray, stage: str):
        if stage == 'train':
            self.train_features.append(feats)
            self.train_labels.append(labels)
        elif stage == 'val':
            self.val_features.append(feats)
            self.val_labels.append(labels)
        else:
            raise ValueError(f"Invalid stage: {stage}. Must be 'train' or 'val'.")

    def compute(self):
        is_best = False

        train_features = np.concatenate(self.train_features)
        train_labels = np.concatenate(self.train_labels)

        val_features = np.concatenate(self.val_features)
        val_labels = np.concatenate(self.val_labels)

        train_features_scaled = self.scaler_features.fit_transform(train_features)
        val_features_scaled = self.scaler_features.transform(val_features)

        self.model.fit(train_features_scaled, train_labels)

        predictions_normalized = self.model.predict(val_features_scaled)

        predictions_denorm = predictions_normalized * self.stds_train + self.means_train
        val_labels_denorm = val_labels * self.stds_train + self.means_train

        metrics_results = {}
        r2_values = []
        mae_values = []

        for idx, label_name in enumerate(self.labels_names):
            y_true = torch.from_numpy(val_labels_denorm[:, idx])
            y_pred = torch.from_numpy(predictions_denorm[:, idx])

            r2 = r2_score(preds=y_pred, target=y_true)
            mae = mean_absolute_error(preds=y_pred, target=y_true)

            metrics_results[f'R2_{label_name}'] = r2.item()
            metrics_results[f'MAE_{label_name}'] = mae.item()

            r2_values.append(r2.item())
            mae_values.append(mae.item())

        mean_r2 = np.mean(r2_values)
        mean_mae = np.mean(mae_values)

        metrics_results['mean_R2'] = mean_r2
        metrics_results['mean_MAE'] = mean_mae
        
        rv_indices = [i for i, label in enumerate(self.labels_names) if 'RV' in label]
        lv_indices = [i for i, label in enumerate(self.labels_names) if 'LV' in label]

        for metric_name in ['R2', 'MAE']:
            if rv_indices:
                mean_rv = np.mean([metrics_results[f"{metric_name}_{self.labels_names[i]}"] for i in rv_indices])
                metrics_results[f'Mean_RV_{metric_name}'] = mean_rv
            if lv_indices:
                mean_lv = np.mean([metrics_results[f"{metric_name}_{self.labels_names[i]}"] for i in lv_indices])
                metrics_results[f'Mean_LV_{metric_name}'] = mean_lv

        if self.best_mean_r2 is None or mean_r2 > self.best_mean_r2:
            self.best_mean_r2 = mean_r2
            self.best_metrics_r2 = metrics_results.copy()
            is_best = True

            for name, value in self.best_metrics_r2.items():
                wandb.log({f'sklearn_best_mean_R2/{name}': value})

            self.plot_predictions(val_labels_denorm, predictions_denorm, stage='best_mean_R2')

        if self.best_mean_mae is None or mean_mae < self.best_mean_mae:
            self.best_mean_mae = mean_mae
            self.best_metrics_mae = metrics_results.copy()
            
            for name, value in self.best_metrics_mae.items():
                wandb.log({f'sklearn_best_mean_MAE/{name}': value})
            self.plot_predictions(val_labels_denorm, predictions_denorm, stage='best_mean_MAE')

        self.train_features = []
        self.train_labels = []
        self.val_features = []
        self.val_labels = []
    
        return is_best
    
    def plot_predictions(self, y_true, y_pred, stage):
        for idx, label_name in enumerate(self.labels_names):
            plt.figure(figsize=(6, 6))
            plt.scatter(y_true[:, idx], y_pred[:, idx], alpha=0.5)
            plt.xlabel('Actual Values')
            plt.ylabel('Predicted Values')
            plt.title(f'Predictions vs Actual for {label_name}')
            
            line_min = min(y_true[:, idx].min(), y_pred[:, idx].min())
            line_max = max(y_true[:, idx].max(), y_pred[:, idx].max())

            plt.plot([line_min, line_max], [line_min, line_max], 'r--')  # Red line for perfect prediction
            plt.grid(True)

            wandb.log({f"sklearn_{stage}_plots/{label_name}": wandb.Image(plt.gcf())})
            plt.close()