import numpy as np
import torch
from typing import List, Optional, Callable
from pytorch_grad_cam.base_cam import BaseCAM
from pytorch_grad_cam.activations_and_gradients import TokenAAG
import ttach as tta


class TokenCAM(BaseCAM):
    def __init__(
        self,
        model,
        target_layers,
        reshape_transform: Optional[Callable] = None,
        compute_input_gradient: bool = False,
        uses_gradients: bool = True,
        tta_transforms: Optional[tta.Compose] = None,
    ):

        self.model = model.eval()
        self.target_layers = target_layers

        # Use the same device as the model.
        self.device = next(self.model.parameters()).device
        self.reshape_transform = reshape_transform
        self.compute_input_gradient = compute_input_gradient
        self.uses_gradients = uses_gradients
        if tta_transforms is None:
            self.tta_transforms = tta.Compose(
                [
                    tta.HorizontalFlip(),
                    tta.Multiply(factors=[0.9, 1, 1.1]),
                ]
            )
        else:
            self.tta_transforms = tta_transforms

        self.activations_and_grads = TokenAAG(self.model, target_layers)

    def get_cam_weights(
        self,
        input_tensor,
        target_layer,
        target_category,
        activations: np.ndarray,
        grads: np.ndarray,
    ):
        """Calculate the weights for different patches through the class token.
        Args:
            activations (torch.Tensor): The activations from the target layer.
                [B, 197, 768] with cls token
        Returns:
            weights: [B, 14, 14] for 2D image, the weight for different patches
        """
        # 2D image
        if len(activations.shape) == 3:
            cls_token = torch.from_numpy(activations[:, 0, :])
            tokens = torch.from_numpy(activations[:, 1:, :])
            # compute the cosine similarity between the cls token and the other tokens
            weights = torch.nn.functional.cosine_similarity(
                cls_token.unsqueeze(1), tokens, dim=-1
            )
            weights = weights.reshape(-1, 64, 64)
            # normalize the weights
            weights = (weights - weights.min()) / (
                weights.max() - weights.min()
            )
            # weights = 1 + weights.reshape(-1, 14, 14)
            return weights.cpu().numpy()
            # cls_token = activations[:, 0, :]
            # tokens = activations[:, 1:, :]
            # # compute the cosine similarity between the cls token and the other tokens
            # # Normalize cls_token and tokens to compute cosine similarity
            # cls_token_norm = cls_token / np.linalg.norm(
            #     cls_token, axis=-1, keepdims=True
            # )  # Shape: (batch_size, feature_dim)
            # tokens_norm = tokens / np.linalg.norm(
            #     tokens, axis=-1, keepdims=True
            # )  # Shape: (batch_size, num_tokens-1, feature_dim)

            # # Compute cosine similarity (batch-wise dot product between cls_token and tokens)
            # weights = np.einsum(
            #     "bf,bnf->bn", cls_token_norm, tokens_norm
            # )  # Shape: (batch_size, num_tokens-1)
            # weights = weights.reshape(-1, 14, 14)
            # return weights

        else:
            raise ValueError(
                "TokenCAM for 3D images is not implemented yet."
                "Invalid shape."
                "Shape of grads should be 3 (2D image)."
            )

    def get_cam_image(
        self,
        input_tensor: torch.Tensor,
        target_layer: torch.nn.Module,
        targets: List[torch.nn.Module],
        activations: np.ndarray,
        grads: np.ndarray,
        eigen_smooth: bool = False,
    ) -> np.ndarray:
        weights = self.get_cam_weights(
            input_tensor, target_layer, targets, activations, grads
        )
        # 2D conv [B, L, D]
        if len(activations.shape) == 3:
            # activations = activations[:, 1:, :].reshape(
            #     activations.shape[0], 14, 14, activations.shape[2]
            # )
            # activations = activations.transpose(0, 3, 1, 2)
            # weighted_activations = weights[:, None, ...] * activations
            weighted_activations = weights[:, None, ...]
        else:
            raise ValueError(
                f"Invalid activation shape. Get {activations.shape}."
            )

        cam = weighted_activations.sum(axis=1)
        return cam  # type: ignore

    def forward(
        self,
        input_tensor: torch.Tensor,
        targets: Optional[List[torch.nn.Module]],
        eigen_smooth: bool = False,
    ) -> np.ndarray:

        from pytorch_grad_cam.utils.model_targets import ClassifierOutputTarget

        input_tensor = input_tensor.to(self.device)

        if self.compute_input_gradient:
            input_tensor = torch.autograd.Variable(
                input_tensor, requires_grad=True
            )

        self.outputs = outputs = self.activations_and_grads(input_tensor)

        if targets is None:
            target_categories = np.argmax(outputs.cpu().data.numpy(), axis=-1)
            tgt = [
                ClassifierOutputTarget(category)
                for category in target_categories
            ]
        else:
            tgt = [ClassifierOutputTarget(target) for target in targets]

        # if self.uses_gradients:
        #     self.model.zero_grad()
        #     loss = sum([target(output) for target, output in zip(tgt, outputs)])
        #     loss.backward(retain_graph=True)  # type: ignore

        # In most of the saliency attribution papers, the saliency is
        # computed with a single target layer.
        # Commonly it is the last convolutional layer.
        # Here we support passing a list with multiple target layers.
        # It will compute the saliency image for every image,
        # and then aggregate them (with a default mean aggregation).
        # This gives you more flexibility in case you just want to
        # use all conv layers for example, all Batchnorm layers,
        # or something else.
        cam_per_layer = self.compute_cam_per_layer(input_tensor, tgt, eigen_smooth)  # type: ignore
        return self.aggregate_multi_layers(cam_per_layer)


# def compute_cam_per_layer(
#     self,
#     input_tensor: torch.Tensor,
#     targets: List[torch.nn.Module],
#     eigen_smooth: bool,
# ) -> np.ndarray:
#     activations_list = [
#         a.cpu().data.numpy() for a in self.activations_and_grads.activations
#     ]
#     grads_list = [
#         g.cpu().data.numpy() for g in self.activations_and_grads.gradients
#     ]
#     target_size = self.get_target_width_height(input_tensor)

#     cam_per_target_layer = []
#     # Loop over the saliency image from every layer
#     for i in range(len(self.target_layers)):
#         target_layer = self.target_layers[i]
#         # TODO comment the None
#         # layer_activations = None
#         # layer_grads = None
#         if i < len(activations_list):
#             layer_activations = activations_list[i]
#         if i < len(grads_list):
#             layer_grads = grads_list[i]

#         cam = self.get_cam_image(
#             input_tensor,
#             target_layer,
#             targets,
#             layer_activations,
#             layer_grads,
#             eigen_smooth,
#         )
#         cam = np.maximum(cam, 0)
#         scaled = scale_cam_image(cam, target_size)
#         cam_per_target_layer.append(scaled[:, None, :])

#     return np.array(cam_per_target_layer)
