from omegaconf import DictConfig

import torch
import torch.nn as nn
import numpy as np

from torch.nn import BCEWithLogitsLoss, MSELoss

from ecgcmr.utils.metrics import BinaryClassificationMetrics, RegressionMetrics, SklearnModel
from ecgcmr.utils.misc import filter_ed_labels, Plotter


class LastLayer(nn.Module):
    def __init__(
            self,
            in_size: int,
            out_size: int,
            init_type: str = 'xavier',
            mask_labels: bool = False,
            use_mlp: str = 'LinearLayer',
            ) -> None:
        super().__init__()

        self.init_type = init_type
        self.mask_labels = mask_labels
        self.use_mlp = use_mlp

        if self.use_mlp == "MLP":
            self.linear_probe_layer = nn.Sequential(
                nn.Linear(in_features=in_size, out_features=int(in_size//4), bias=True),
                nn.ReLU(),
                nn.Linear(in_features=int(in_size//4), out_features=int(in_size//16), bias=True),
                nn.ReLU(),
                nn.Linear(in_features=int(in_size//16), out_features=out_size, bias=True),
            )
        else:
            self.linear_probe_layer = nn.Linear(in_features=in_size, out_features=out_size, bias=True)

        self.apply(self.init_weights)
    
    def init_weights(self, m, init_gain=0.02) -> None:
      if isinstance(m, nn.Linear):
          if self.init_type == 'normal':
              nn.init.normal_(m.weight.data, 0, init_gain)
          elif self.init_type == 'xavier':
              nn.init.xavier_normal_(m.weight.data, gain=init_gain)
          elif self.init_type == 'kaiming':
              nn.init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
          elif self.init_type == 'orthogonal':
              nn.init.orthogonal_(m.weight.data, gain=init_gain)
          if hasattr(m, 'bias') and m.bias is not None:
              nn.init.constant_(m.bias.data, 0.0)

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        if self.mask_labels:
            x = x[:, 0] # TAKE ONLY ED FRAME

        return self.linear_probe_layer(x)


class LastLayerConfiguration:
    def __init__(self, cfg: DictConfig, encoded_dim: int, mask_labels: bool, training_scheme: str = 'LP', use_mlp: str = 'LinearLayer'):
        self.cfg = cfg
        
        self.encoded_dim = encoded_dim
        self.mask_labels = mask_labels

        self.task = cfg.downstream_task.task
        self.type = cfg.downstream_task.type

        self.training_scheme = training_scheme
        self.use_mlp = use_mlp

        if self.type == 'vol':
            target_names_full = cfg.downstream_task.target_vol
            units_names_full = cfg.downstream_task.units_vol
        elif self.type == 'area':
            target_names_full = cfg.downstream_task.target_area
            units_names_full = cfg.downstream_task.units_area

        if self.mask_labels:
            self.ids_to_take = filter_ed_labels(target_names_full)
            self.labels_names = [target_names_full[i] for i in self.ids_to_take]
            self.units = [units_names_full[i] for i in self.ids_to_take]
        else:
            self.ids_to_take = list(range(len(target_names_full)))
            self.labels_names = target_names_full
            self.units = units_names_full

        self.out_size = len(self.ids_to_take)

        if self.type == 'vol':
            self.means_train = np.load(cfg.downstream_task.paths.mean_train_labels_vol, mmap_mode='r')[self.ids_to_take]
            self.stds_train = np.load(cfg.downstream_task.paths.std_train_labels_vol, mmap_mode='r')[self.ids_to_take]
        elif self.type == 'area':
            self.means_val = np.load(cfg.downstream_task.paths.mean_val_labels_area, mmap_mode='r')[self.ids_to_take]
            self.stds_val = np.load(cfg.downstream_task.paths.std_val_labels_area, mmap_mode='r')[self.ids_to_take]

    def create_last_layer(self):
        if self.task == 'classification':
            return LastLayer(in_size=self.encoded_dim,
                             out_size=self.out_size - 1,
                             mask_labels=self.mask_labels,
                             use_mlp=self.use_mlp)
        elif self.task == 'regression':
            return LastLayer(in_size=self.encoded_dim,
                             out_size=self.out_size,
                             mask_labels=self.mask_labels,
                             use_mlp=self.use_mlp)

    def create_criterion(self):
        if self.task == 'classification':
            return BCEWithLogitsLoss(pos_weight=torch.tensor([self.pos_weight]))
        
        elif self.task == 'regression':
            return MSELoss()
    
    def create_sklearn_model(self):
        if self.task == 'regression':
            return SklearnModel(labels_names=self.labels_names,
                                means_train=self.means_train,
                                stds_train=self.stds_train,
                                units=self.units)
    
    def create_metrics(self):
        if self.task == 'classification':
            return BinaryClassificationMetrics()
        
        elif self.task == 'regression':
            return RegressionMetrics(labels_names=self.labels_names,
                                     means_train=self.means_train,
                                     stds_train=self.stds_train,
                                     ids_to_take=self.ids_to_take,
                                     use_mlp=self.use_mlp,
                                     training_scheme=self.training_scheme)
        
    def create_plotter(self):
        if self.task == 'regression':
            return Plotter(
                units=self.units,
                labels_names=self.labels_names
            )