# Generate explainability plots for each models

import os
import multiprocessing

import tqdm
import torch
import torchvision

import pandas as pd
import polars as pl
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns

from PIL import Image
from scipy.spatial import distance
from sklearn import model_selection, metrics

try:
    from pytorch_grad_cam import FullGrad
    from pytorch_grad_cam.utils.model_targets import ClassifierOutputTarget
except ImportError:
    print('GradCAM not installed')

from config import Config
from copd_nn import ECGDataset

torch.manual_seed(Config.random_state)
torch.cuda.manual_seed(Config.random_state)
np.random.seed(Config.random_state)
torch.backends.cudnn.benchmark = True

plt.rcParams["axes.edgecolor"] = "0.15"
plt.rcParams["axes.linewidth"] = 1
plt.rcParams["axes.formatter.use_locale"] = True
plt.rcParams['font.serif'] = ['Helvetica [adobe]']

tqdm.tqdm.pandas()

TO_EXPLAIN = 50

# Set matplotlib to no GUI
# plt.switch_backend('agg')


class Explainability:
    def __init__(self, outcome, dataset):
        self.outcome = outcome
        self.dataset = pl.read_parquet(dataset)

    def get_model(self, model_identifier, load_model=True):
        # Get the best model given the outcome and percentage
        df_results = pd.read_pickle(f'Results/{self.outcome}/{model_identifier}/results.pickle')
        best_epoch = df_results['Epoch'].iloc[df_results['AUROC'].idxmax()]

        if not load_model:
            return None, best_epoch

        dict_location = f'Results/{self.outcome}/{model_identifier}/models/model_{best_epoch}.pt'
        state_dict = torch.load(dict_location)
        state_dict = {k.replace('module.', ''): v for k, v in state_dict.items()}

        # Load model
        model = torchvision.models.convnext_large(weights=None)
        model.classifier[2] = torch.nn.Linear(1536, 2)  # Special to this - 2 classes

        # Load state dict
        model.load_state_dict(state_dict)

        # Set to eval
        model.eval()

        return model, best_epoch

    def get_dataset(self, model_identifier, best_epoch):
        # Load dataset
        splitter = model_selection.GroupShuffleSplit(
            n_splits=1, random_state=Config.random_state,
            test_size=0.1)

        ext_val_facility = "ST.LUKE'S-ROOSEVELT HOSPITAL (S)"
        df_ext_val = self.dataset\
            .filter(pl.col('SITENAME') == ext_val_facility)\
            .to_pandas()
        self.dataset = self.dataset\
            .filter(pl.col('SITENAME') != ext_val_facility)\
            .to_pandas()

        # NOTE Make sure all dataframes correspond to this scheme
        splits = splitter.split(
            self.dataset['ECGID'],
            self.dataset['LABEL'],
            groups=self.dataset['MRN'])

        # Lazy loader
        _, test = next(splits)
        df_test = self.dataset.iloc[test]

        # Get corresponding predictions
        df_pred = pd.read_pickle(f'Results/{self.outcome}/{model_identifier}/prob_{best_epoch}.pickle')
        df_pred['ECGID'] = df_pred['FILES'].str.split('/').str[-1].str.split('.').str[0]

        # Put these dataframes together - inner join
        df_test = df_test.merge(df_pred, on='ECGID', how='inner')
        highest_positive_idx = df_test.query('LABEL == 1')['PRED'].nlargest(TO_EXPLAIN).index
        df_test = df_test.iloc[highest_positive_idx][['ECGID', 'LABEL']]

        return df_test

    def create_dataloader(self, df):
        # Create datasets with these data splits
        test_dataset = ECGDataset(df, return_image_path=True)
        test_dataloader = torch.utils.data.DataLoader(
            test_dataset,
            batch_size=1,
            shuffle=False,
            num_workers=40,
            pin_memory=True)

        return test_dataloader
    
    def generate_explanations(self, model, dataloader, model_identifier):
        cam = FullGrad(model=model, target_layers=[], use_cuda=True)
        targets = [ClassifierOutputTarget(1)]

        # Colormap
        cmap = 'jet'

        # Iterate through the dataloader
        for i, (image, __class__, image_path) in enumerate(dataloader):

            # Output
            image_identifier = os.path.basename(image_path[0]).split('.')[0]
            outfile = f'Explainability/{self.outcome}/{image_identifier}.png'
            if os.path.exists(outfile):
                print(f'File exists: {outfile}')
                continue
            else:
                print(i, outfile)

            # Generate explanation
            grayscale_cam = cam(input_tensor=image, targets=targets)
            grayscale_cam = grayscale_cam[0, :]

            # Init the figure
            plt.figure(figsize=(10, 10))

            # Get the original image
            original_image = Image.open(image_path[0])

            # Delete everything except black pixels
            pixdata = original_image.load()
            width, height = original_image.size
            for y in range(height):
                for x in range(width):

                    # Make all white pixels transparent
                    if pixdata[x, y] == (255, 255, 255, 255):
                        pixdata[x, y] = (255, 255, 255, 0)
                    
                    # Make all non transparent pixels black
                    if pixdata[x, y] != (255, 255, 255, 0):
                        pixdata[x, y] = (0, 0, 0, 255)

            # Resize the heatmap to the size of the original image using PIL
            heatmap = Image.fromarray(grayscale_cam)
            heatmap = heatmap.resize((original_image.size[0], original_image.size[1]))
            heatmap = np.array(heatmap)

            # Add the attributions
            plt.imshow(heatmap, cmap=cmap, alpha=.7)

            # Add the original image
            plt.imshow(original_image)

            # Remove the axis
            plt.axis('off')

            # Subplots adjust to fit the figure
            plt.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=0, hspace=0)
            plt.savefig(outfile, dpi=300)
            plt.close()
    
    def consolidate(self, outcome):
        # Redo order of model_paths according to the Config.models dictionary
        img_dir = './CuratedExplainability/'
        image_paths = [
            os.path.join(img_dir, i) for i in os.listdir(img_dir) if i.endswith('.png')
        ]

        # Create the figure
        fig, axes = plt.subplots(2, 2, figsize=(18, 20))
        axes = axes.ravel()

        prefixes = list('abcd')

        for count, image_path in enumerate(image_paths):
            current_ax = axes[count]

            try:
                image = Image.open(image_path)
                current_ax.imshow(image)
            except AttributeError:
                return

            # Remove all ticks and labels
            current_ax.set_xticks([])
            current_ax.set_yticks([])
            current_ax.set_xlabel('')
            current_ax.set_ylabel('')

            # Titles
            current_ax.text(
                -0.0,
                1.02,
                prefixes[count] + '  ',
                transform=current_ax.transAxes,
                size=28, weight='bold')

            # Suptitle
            if count == 0:
                current_ax.text(
                -0.0,
                1.15,
                'COPD Diagnosis: Explainability plots',
                transform=current_ax.transAxes,
                size=54, weight='bold')

        plt.subplots_adjust(
            top=0.9,
            bottom=0.02,
            left=0.02,
            right=.98,
            hspace=0.05,
            wspace=0.05
        )
            
        plt.savefig(f'Figures/Explainability_{outcome}.png', dpi=200)
        plt.close()


    def hammer_time(self):
        model_identifier = 'convnext_large'
        
        # Iterate through models and generate explanations for an ECG
        # print('Starting explanations')
        # os.makedirs(f'Explainability/{self.outcome}', exist_ok=True)
        # for model_identifier in Config.models:
        #     model, best_epoch = self.get_model(model_identifier, load_model=True)
        #     df_test = self.get_dataset(model_identifier, best_epoch)
        #     test_dataloader = self.create_dataloader(df_test)
        #     self.generate_explanations(model, test_dataloader, model_identifier)

        print('Starting consolidation')
        os.makedirs(f'Results', exist_ok=True)
        self.consolidate(self.outcome)

