import os
from typing import Dict, Tuple, Callable, List
from tqdm import tqdm
from contextlib import nullcontext
import numpy as np
import scipy.stats
import pandas as pd
from sklearn.metrics import roc_curve, auc
from captum.attr import IntegratedGradients
from torch.autograd import profiler
import torch
from torch.utils.data import Dataset, DataLoader


from DeepPPG.config import PredConfig
from DeepPPG.utils import landmark_metrics
from DeepPPG.utils import loss
from DeepPPG.model.base import to_tensor
from DeepPPG.training.trainer import BaseTrainer



class Seq2OneTrainer(BaseTrainer):
    def __init__(self, model: torch.nn.Module, config: PredConfig):
        super().__init__(model, config)
        self.criterion = getattr(loss, config.criterion["criterion"])(config)

    def forward_epoch(self, train: bool, dataloader: DataLoader, record_pred: bool, **kwargs):
        print_time_context = profiler.profile(use_cuda=True if "cuda" in self.config.device else False) if self.config.print_time else nullcontext()
        with print_time_context as prof:
            if train:
                self.model.train()
            else:
                self.model.eval()

            total_loss = 0
            pred_list = []
            label_list = []

            if self.config.verbose:
                dataloader = tqdm(dataloader, desc=f"{'Train epoch' if train else 'Eval'}: {self._epoch}")

            context = nullcontext() if train else torch.no_grad()
            with context:
                if self.config.do_meta:
                    for x, meta, y, _ in dataloader:
                        x = to_tensor(x, "float32", self.config.device)
                        y = to_tensor(y, "float", "cpu")
                        x_cuda = x.to(self.config.device)
                        y_cuda = y.to(self.config.device)
                        meta_cuda = meta.to(self.config.device)
                        # Forward pass
                        if self.config.if_t_dist:
                            y_pred, feat, center_loss, means, ra, rl = self.model(x_cuda, meta_cuda, y=y_cuda)
                            y = torch.argmax(y, dim=-1)
                            loss = self.criterion(y_pred, to_tensor(y, "int64", self.config.device)) + center_loss
                            y_pred = y_pred[:, -1]
                            total_loss += loss.item()
                        else:
                            y_pred = self.model(x_cuda)
                            loss = self.criterion(y_pred, to_tensor(y, "float", self.config.device))
                            total_loss += loss.item()

                        if record_pred:
                            pred_list.extend(y_pred.squeeze().to('cpu').tolist())
                            label_list.extend(y.squeeze().tolist())

                        if train:
                            self.optimizer.zero_grad()
                            loss.backward(retain_graph=True)
                            torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.config.grad_bound)
                            self.optimizer.step()
                
                else:
                    for x, y, _ in dataloader:
                        x = to_tensor(x, "float32", self.config.device)
                        y = to_tensor(y, "float", "cpu")
                        x_cuda = x.to(self.config.device)
                        # Forward pass
                        if self.config.if_t_dist:
                            y_pred, feat, center_loss, means, ra, rl = self.model(x_cuda, y=y)
                            loss = self.criterion(y_pred, to_tensor(y, "float", self.config.device)) + center_loss
                            total_loss += loss.item()
                        else:
                            y_pred = self.model(x_cuda)
                            loss = self.criterion(y_pred, to_tensor(y, "float", self.config.device))
                            total_loss += loss.item()

                        if record_pred:
                            pred_list.extend(y_pred.squeeze().to('cpu').tolist())
                            label_list.extend(y.squeeze().tolist())

                        if train:
                            self.optimizer.zero_grad()
                            loss.backward(retain_graph=True)
                            torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.config.grad_bound)
                            self.optimizer.step()

        # Additional return for gradients
        if self.config.print_time:
            if "cuda" in self.config.device and self.config.dataloader_workers == 0:
                print(prof.key_averages().table(sort_by="cuda_memory_usage"))
            else:
                print(prof.key_averages().table(sort_by="cpu_time_total"))

        return [np.array(pred_list), np.array(label_list)], total_loss / len(dataloader)

    def test_all(self, model: torch.nn.Module, dataloader: DataLoader, prefix: str) -> Dict:
        """
        Tests the model on the provided dataset and logs the results to W&B.

        :param model: The trained model.
        :param dataloader: DataLoader object for the dataset.
        :param prefix: Prefix for the metric names.
        :return: Dictionary of metrics.
        """
        print(f"Evaluation on {prefix} set ...")
        metric_dict = {}
        record, loss = self.forward_epoch(train=False, dataloader=dataloader, record_pred=True)
        metric_dict[f'{prefix}_loss'] = np.mean(loss)

        # compute metrics
        preds, labels = record
        # y_pred = torch.cat(pred_list, dim=0).mean(dim=1)
        # y = torch.cat(y_list, dim=0).unsqueeze(dim=-1)

        classification_scores = landmark_metrics.evaluate_landmark_metrics(preds, labels)
        classification_scores = {f'{prefix}_{k}': v for k, v in classification_scores.items()}
        return {**metric_dict, **classification_scores}

    def visu_attribution(self, *args, **kwargs):
        raise NotImplementedError

