# Neural network

import os
import time

import tqdm
import torch
import torchvision
import pandas as pd
import polars as pl
import numpy as np 

from PIL import Image
from sklearn import metrics, model_selection

from config import Config


torch.manual_seed(Config.random_state)
torch.cuda.manual_seed(Config.random_state)
np.random.seed(Config.random_state)
torch.backends.cudnn.benchmark = True

tqdm.tqdm.pandas()


class ECGDataset(torch.utils.data.Dataset):
    def __init__(self, df, return_image_path=False):
        # Create one iterable that can be __getitemed__
        self.df = df
        self.return_image_path = return_image_path

    def __len__(self):
        return self.df.shape[0]

    def __getitem__(self, index):
        record = self.df.iloc[index]
        filepath = os.path.join(Config.dir_plotted_ecgs, record['ECGID'] + '.png')
        image = Image.open(filepath).convert('RGB')

        # Transforms
        transforms = torchvision.transforms.Compose([
            torchvision.transforms.Resize((448, 448)),
            torchvision.transforms.ToTensor(),
            torchvision.transforms.Normalize(
                mean=[0.485, 0.456, 0.406],
                std=[0.229, 0.224, 0.225])
        ])
        image_tensor = transforms(image)
        label = torch.tensor(record['LABEL'], dtype=torch.long if Config.ce_loss else torch.float)

        if self.return_image_path:
            return image_tensor, label, filepath

        return image_tensor, label


class Finetune:
    def __init__(self, outcome, dataset):
        self.outcome = outcome
        self.dataset = pl.read_parquet(dataset)

        # Cleanup dataset
        existing_files = [
            filename.replace('.png', '')
            for filename in os.listdir(Config.dir_plotted_ecgs)]
        
        # Check for file existence
        self.dataset = self.dataset\
            .filter(pl.col('ECGID').is_in(existing_files))

        # Cleanup dataset
        print('Total samples', self.dataset.shape)
        print('Prevalence', self.dataset['LABEL'].mean())

    @staticmethod
    def eval_model(dataloader, model):
        # Evaluate the model for this epoch
        all_preds = []
        all_labels = []
        all_files = []

        # CE or BCE loss
        criterion = torch.nn.CrossEntropyLoss() if Config.ce_loss else torch.nn.BCEWithLogitsLoss()

        model.eval()
        for images, labels, paths in tqdm.tqdm(dataloader):
            all_files.extend(paths)
            testing_epoch_loss = 0

            with torch.cuda.amp.autocast():
                with torch.no_grad():
                    images = images.cuda()
                    labels = labels.cuda()

                    outputs = model(images)

                    # Calculate loss - probably not required
                    testing_loss = criterion(outputs.squeeze(), labels)
                    testing_epoch_loss += testing_loss.item() * outputs.shape[0]

                    # Put all outputs together
                    if Config.ce_loss:
                        # This allows for the usage of the rest of the pipeline as is without making any changes
                        normalized_preds = torch.softmax(outputs, dim=1)[:, 1].detach().cpu().numpy()
                    else:
                        normalized_preds = torch.sigmoid(outputs).detach().cpu().numpy()

                    all_preds.extend(normalized_preds)
                    all_labels.extend(labels.cpu().numpy())

        # Overall epoch loss
        testing_loss = testing_epoch_loss / len(dataloader)
        df_pred = pd.DataFrame({'FILES': all_files, 'TRUE': all_labels, 'PRED': all_preds})

        df_pred['TRUE'] = df_pred['TRUE'].astype(int)
        if not Config.ce_loss:
            df_pred['PRED'] = df_pred['PRED'].str[0]

        auroc = metrics.roc_auc_score(df_pred['TRUE'], df_pred['PRED'])
        precision, recall, _ = metrics.precision_recall_curve(df_pred['TRUE'], df_pred['PRED'])
        aupr = metrics.auc(recall, precision)

        return auroc, aupr, testing_loss, df_pred

    def gaping_maw(self, train_dataloader, test_dataloader, ext_val_dataloader, model, model_identifier):
        print('Model identifier:', model_identifier)

        # Housekeeping
        result_dir = os.path.join('Results', self.outcome, model_identifier)
        os.makedirs(result_dir, exist_ok=True)

        # Use this model as the base for training further
        # Expected to have proper architecture when it arrives here
        model = torch.nn.DataParallel(model)
        model = model.cuda()
        
        # Create model params
        if model_identifier == 'vit':
            optimizer = torch.optim.AdamW(model.parameters(), lr=3e-4)
        else:
            optimizer = torch.optim.Adam(model.parameters(), lr=3e-4)

        # CE or BCE loss
        criterion = torch.nn.CrossEntropyLoss() if Config.ce_loss else torch.nn.BCEWithLogitsLoss()

        scaler = torch.cuda.amp.GradScaler()

        scheduler = torch.optim.lr_scheduler.OneCycleLR(
            optimizer, max_lr=1e-3, epochs=Config.ft_epochs,
            steps_per_epoch=len(train_dataloader))

        # Keep track of performance
        all_results = []

        # Start the loop
        for epoch in range(Config.ft_epochs + 5):
            epoch_loss = 0

            model.train()
            for images, labels in tqdm.tqdm(train_dataloader):

                with torch.cuda.amp.autocast():
                    images = images.cuda()
                    labels = labels.cuda()

                    # Same as optim.zero_grad()
                    for param in model.parameters():
                        param.grad = None

                    # Forward pass
                    outputs = model(images)

                    # Calculate loss
                    loss = criterion(outputs.squeeze(), labels)
                    epoch_loss += loss.item() * outputs.shape[0]

                    scaler.scale(loss).backward()
                    scaler.step(optimizer)
                    scaler.update()

                    # Continue training for 5 epochs more
                    if epoch < Config.ft_epochs:
                        scheduler.step()

            # Overall epoch loss
            training_loss = epoch_loss / len(train_dataloader)

            # Get learning rate from scheduler
            lr = scheduler.get_last_lr()[0]

            # Other things about the training
            training_samples = len(train_dataloader.dataset)
            testing_samples = len(test_dataloader.dataset)
            if Config.perform_ext_val:
                ext_val_samples = len(ext_val_dataloader.dataset)

            # Evalulate model on testing and external validation sets
            auroc, aupr, testing_loss, df_pred = Finetune.eval_model(test_dataloader, model)
            if Config.perform_ext_val:
                ext_val_auroc, ext_val_aupr, ext_val_testing_loss, ext_val_df_pred = Finetune.eval_model(ext_val_dataloader, model)

            # Results for this model for this epoch
            all_results.append([
                model_identifier, epoch, lr,
                auroc, aupr, ext_val_auroc, ext_val_aupr,
                training_loss, testing_loss, ext_val_testing_loss,
                training_samples, testing_samples, ext_val_samples])
            df_results = pd.DataFrame(all_results)
            df_results.columns = [
                'Model', 'Epoch', 'LR',
                'AUROC', 'AUPR', 'ExtValAUROC', 'ExtValAUPR',
                'TrainingLoss', 'TestingLoss', 'ExtValTestingLoss',
                'TrainingSamples', 'TestingSamples', 'ExtValSamples']

            print('Model', model_identifier, 'Epoch:', epoch, 'LR:', lr)
            print(
                'AUROC:', auroc, 'AUPR:', aupr, 'Ext val AUROC:', ext_val_auroc, 'Ext val AUPR:', ext_val_aupr,
                'Training loss:', training_loss, 'Testing loss:', testing_loss, 'Ext val loss:', ext_val_testing_loss)

            outfile_name = os.path.join(result_dir, f'results.pickle')
            df_results.to_pickle(outfile_name)

            # Prediction probabilities for this model for this epoch
            outfile_name = os.path.join(result_dir, f'prob_{epoch}.pickle')
            df_pred.to_pickle(outfile_name)

            outfile_name_ext = os.path.join(result_dir, f'prob_ext_{epoch}.pickle')
            ext_val_df_pred.to_pickle(outfile_name_ext)

            # Save the model at the end of each epoch
            model_out_dir = os.path.join(result_dir, 'models')
            os.makedirs(model_out_dir, exist_ok=True)
            outfile_name = os.path.join(model_out_dir, f'model_{epoch}.pt')
            torch.save(model.state_dict(), outfile_name)

    def create_model(self, identifier):
        weight_identifier = Config.models[identifier]['weights']
        weights = None
        if weight_identifier is not None:
            weights = eval(f'torchvision.models.{weight_identifier}.IMAGENET1K_V1')
        model = eval(f'torchvision.models.{identifier}(weights=weights)')

        # Rename the identifier by splitting away the -
        identifier = identifier.split('-')[0]

        # CE or BCE loss
        num_classes = 2 if Config.ce_loss else 1

        # Classification head
        if identifier == 'efficientnet_b4':
            model.classifier[1] = torch.nn.Linear(1792, num_classes)
        if identifier == 'densenet201':
            model.classifier = torch.nn.Linear(1920, num_classes)
        if identifier.startswith('resnet'):
            model.fc = torch.nn.Linear(2048, num_classes)
        if identifier.startswith('convnext'):
            model.classifier[2] = torch.nn.Linear(1536, num_classes)

        return model

    def create_dataloaders(self, model_identifier=None):
        splitter = model_selection.GroupShuffleSplit(
            n_splits=1, random_state=Config.random_state,
            test_size=0.1)

        ext_val_facility = "ST.LUKE'S-ROOSEVELT HOSPITAL (S)"
        df_ext_val = self.dataset\
            .filter(pl.col('SITENAME') == ext_val_facility)\
            .to_pandas()
        self.dataset = self.dataset\
            .filter(pl.col('SITENAME') != ext_val_facility)\
            .to_pandas()

        # NOTE Make sure all dataframes correspond to this scheme
        splits = splitter.split(
            self.dataset['ECGID'],
            self.dataset['LABEL'],
            groups=self.dataset['MRN'])

        # Lazy loader
        train, test = next(splits)
        df_train = self.dataset.iloc[train]
        df_test = self.dataset.iloc[test]

        # Create datasets with these data splits
        train_dataset = ECGDataset(df_train)
        test_dataset = ECGDataset(df_test, return_image_path=True)

        # Create data loaders with these datasets
        train_dataloader = torch.utils.data.DataLoader(
            train_dataset,
            batch_size=Config.batch_size,
            shuffle=True,
            num_workers=Config.n_workers,
            drop_last=True,
            pin_memory=True)

        test_dataloader = torch.utils.data.DataLoader(
            test_dataset,
            batch_size=Config.batch_size,
            shuffle=False,
            num_workers=Config.n_workers,
            pin_memory=True)
        
        ext_val_dataloader = None
        ext_val_dataset = ECGDataset(df_ext_val, return_image_path=True)
        ext_val_dataloader = torch.utils.data.DataLoader(
            ext_val_dataset,
            batch_size=Config.batch_size,
            shuffle=False,
            num_workers=Config.n_workers,
            pin_memory=True)

        return train_dataloader, test_dataloader, ext_val_dataloader

    def hammer_time(self):
        with open('Errors.log', 'a') as outfile:
            outfile.write(f'{time.ctime()} New run\n')

        model_identifier = 'convnext_large'
        try:
            model = self.create_model(model_identifier)
            train_dataloader, test_dataloader, ext_val_dataloader = \
                self.create_dataloaders(model_identifier)
            self.gaping_maw(
                train_dataloader, test_dataloader, ext_val_dataloader,
                model, model_identifier)

        except IndexError as e:
            with open('Errors.log', 'a') as outfile:
                outfile.write(f'{time.ctime()} {self.outcome} {model_identifier} {e}\n')
