import sys
import os
import cv2  # OpenCV for image processing
from typing import Union, Optional
import numpy as np  # NumPy for numerical operations
import argparse
import timm
import matplotlib.pyplot as plt
import torch
from torch.utils.data import DataLoader

from main import setup_parser, merge_config, load_json
from utils.data.data_manager import DataManager
from learners import get_learner
from trainer import _set_device, _set_random

from pytorch_grad_cam import GradCAM
from visual.token_cam import TokenCAM
from pytorch_grad_cam.utils.image import show_cam_on_image


__CAM = {
    "gradcam": GradCAM,
    "tokencam": TokenCAM,
}

_batch_size = 128


class ModelWrapper(torch.nn.Module):
    def __init__(self, model):
        super(ModelWrapper, self).__init__()
        self._set_trainable(model, True)
        self.original_model = model

    def _set_trainable(self, model, requires_grad):
        for param in model.parameters():
            param.requires_grad = requires_grad

    def forward(self, x):
        return self.original_model(x)["logits"]

    def __getattr__(self, name):
        # Delegate attribute access to the original model if it's not found in the wrapper
        if name != "original_model" and hasattr(self.original_model, name):
            return getattr(self.original_model, name)
        return super(ModelWrapper, self).__getattr__(name)


class NamespaceDict(argparse.Namespace, dict):
    def __init__(self, *args, **kwargs):
        # Initialize dict with given arguments
        dict.__init__(self, *args, **kwargs)
        # Set all keys as attributes
        for key, value in self.items():
            super().__setattr__(key, value)

    def __getitem__(self, key):
        # Allow dict-style access
        return dict.__getitem__(self, key)

    def __setitem__(self, key, value):
        # Allow dict-style setting and also set attributes
        dict.__setitem__(self, key, value)
        super().__setattr__(key, value)

    def __delitem__(self, key):
        # Allow deletion by both dict-style and attribute-style
        dict.__delitem__(self, key)
        super().__delattr__(key)

    def __getattr__(self, key):
        # Called when accessing non-existent attributes; fallback to dict
        try:
            return dict.__getitem__(self, key)
        except KeyError:
            raise AttributeError(
                f"'NamespaceDict' object has no attribute '{key}'"
            )

    def __setattr__(self, key, value):
        # Set both dict key and attribute when using dot notation
        dict.__setitem__(self, key, value)
        super().__setattr__(key, value)

    def __delattr__(self, key):
        # Allow attribute-style deletion
        try:
            dict.__delitem__(self, key)
        except KeyError:
            pass
        super().__delattr__(key)


def reshape_transform(tensor, height=14, width=14):
    result = tensor[:, 1:, :].reshape(
        tensor.size(0), height, width, tensor.size(2)
    )
    result = result.transpose(2, 3).transpose(
        1, 2
    )  # Rearrange dimensions to (C, H, W)
    return result


def get_args(args=None):
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--aug_smooth",
        action="store_true",
        help="Apply test time augmentation to smooth the CAM",
    )
    parser.add_argument(
        "--eigen_smooth",
        action="store_true",
        help="Reduce noise by taking the first principle componenet"
        "of cam_weights*activations",
    )

    parser.add_argument(
        "--model",
        type=str,
        default="mine",
        help="choose timm or custom method",
    )

    parser.add_argument(
        "--ckp_path",
        type=str,
        default="mine",
        help="choose timm or custom method",
    )
    parser.add_argument(
        "--method",
        type=str,
        default="gradcam",
        help="choose cam methods",
    )

    parser.add_argument(
        "--cam_visual",
        action="store_true",
        help="whether to visualize the cam",
    )

    if args is None:
        # args default to the system args
        args = sys.argv[1:]
    else:
        # make sure that args are mutable
        args = list(args)

    args = parser.parse_args(args)
    return args


def visual(
    imgs: torch.Tensor,
    targets: torch.Tensor,
    models: dict,
    methods: list,
    ckp_path: Optional[str],
    device: Union[str, torch.device],
    need_target: bool = False,
):
    cam_dict = {}
    for key in models.keys():
        cam_dict[key] = {"target": None, "predict": None}

    for idx in range(len(imgs)):
        # alter visualizing index
        print("\n" + "=" * 50)
        print(f"Visualizing {idx}th image")
        print("=" * 50 + "\n")

        img = imgs[idx].unsqueeze(0).to(device)
        # [1] -> [1, 1]
        target = targets[idx].unsqueeze(0).to(device)
        # convert input_img to rgb
        rgb_img = img.cpu().numpy().squeeze().transpose(1, 2, 0)
        save_pth = f"idx{idx}_{target.item()}.jpg"

        for model_name, model in models.items():
            if model_name == "timm":
                model = timm.create_model(
                    "vit_base_patch16_224_in21k", pretrained=True, img_size=1024
                )
                target_layers = [model.blocks[-1]]  # type: ignore
            else:
                om = torch.load(ckp_path)  # type: ignore
                osd = om.state_dict()
                state_dict = learner._network.state_dict()
                for k, v in osd.items():
                    if k in state_dict and state_dict[k].shape == v.shape:
                        # if state_dict[k].shape == v.shape:
                        state_dict[k] = v
                learner._network.load_state_dict(state_dict, strict=False)
                model = ModelWrapper(learner._network)
                target_layers = [model.backbone.cur_adapter[-1]]  # type: ignore
                # target_layers = [model.backbone.blocks[-1]]  # type: ignore
                # target_layers = [model.backbone.blocks[-1], model.backbone.cur_adapter[-1]]  # type: ignore

            model.to(learner._device)
            model.eval()

            cam = __CAM[methods[0]](
                model=model,
                target_layers=target_layers,  # type: ignore
                reshape_transform=reshape_transform,
            )

            grayscale_cam = cam(
                input_tensor=img,
                targets=None,
                eigen_smooth=args.eigen_smooth,
                aug_smooth=args.aug_smooth,
            )
            grayscale_cam = grayscale_cam[0, :]

            # # set pixel smaller than average to zero
            # # grayscale_cam[grayscale_cam < np.mean(grayscale_cam)] = 0
            # # Define the kernel size (e.g., 5x5 kernel for smoothing)
            kernel_size = 3
            kernel = np.ones((kernel_size, kernel_size), np.float32) / (
                kernel_size**2
            )
            # Apply the filter using cv2.filter2D
            grayscale_cam = cv2.filter2D(grayscale_cam, -1, kernel)
            # grayscale_cam = cv2.medianBlur(grayscale_cam, 3)
            grayscale_cam = np.exp(grayscale_cam * 1.5) / np.exp(
                np.max(grayscale_cam * 1.5)
            )

            cam_image = show_cam_on_image(
                rgb_img,
                grayscale_cam,
                use_rgb=True,
                image_weight=0.5,
                colormap=cv2.COLORMAP_JET,
            )
            # cam_image = grayscale_cam[..., None] * np.array(rgb_img * 255)
            cam_image = grayscale_cam / np.max(grayscale_cam)
            # plt_cam_image = cv2.cvtColor(cam_image, cv2.COLOR_BGR2RGB)
            plt_cam_image = cam_image
            cam_dict[model_name]["predict"] = plt_cam_image

            if model_name != "timm" and need_target:
                grayscale_cam = cam(
                    input_tensor=img,
                    targets=target,
                    eigen_smooth=args.eigen_smooth,
                    aug_smooth=args.aug_smooth,
                )
                grayscale_cam = grayscale_cam[0, :]
                cam_image = show_cam_on_image(
                    rgb_img,
                    grayscale_cam,
                    use_rgb=True,
                    image_weight=0.7,
                    colormap=cv2.COLORMAP_JET,
                )
                plt_cam_image = cam_image
                cam_dict[model_name]["target"] = plt_cam_image

        keys = list(cam_dict.keys())
        keys.remove("timm")
        plt.figure(figsize=(30, 10))
        plt.subplot(1, 3, 1)
        plt.imshow(rgb_img)
        plt.title("Original Image", fontsize=30)
        plt.subplot(1, 3, 2)
        plt.imshow(cam_dict["timm"]["predict"])
        plt.title("timm CAM-predict", fontsize=30)

        mn = keys[0]
        plt.subplot(1, 3, 3)
        plt.imshow(cam_dict[mn]["predict"])
        plt.title(f"{mn} CAM-predict", fontsize=30)

        if need_target:
            raise NotImplementedError
        if len(keys) == 2:
            raise NotImplementedError
        if len(methods) == 2:
            raise NotImplementedError

        plt.savefig(os.path.join("visual/cam_images", save_pth))


if __name__ == "__main__":
    # arg_list = [
    #     "--config",
    #     "exps/sdm/imr_inc10.json",
    #     "--prefix",
    #     "debug",
    #     "--device",
    #     "0",
    # ]
    arg_list = [
        "--config",
        "./exps/baseline/single/cub_inc10.json",
        "--prefix",
        "debug",
        "--device",
        "5",
    ]
    a = setup_parser().parse_args(arg_list)
    # b = get_args(["--model", "timm", "--method", "gradcam", "--cam_visual"])
    b = get_args(
        [
            "--model",
            "ss",
            "--method",
            "tokencam",
            "--cam_visual",
            "--ckp_path",
            "ckps/baseline/vit_base_patch16_224_in21k_baseSingle/cub/20241024170012/cub_0.pth",
        ]
    )

    # Create a new Namespace to store the merged result
    merged_args = argparse.Namespace()
    # Add attributes from `a` to the merged Namespace
    for key, value in vars(a).items():
        setattr(merged_args, key, value)
    # Add attributes from `b` to the merged Namespace (this will overwrite existing values if the same key exists)
    for key, value in vars(b).items():
        setattr(merged_args, key, value)

    param = load_json(merged_args.config)
    args = merge_config(merged_args, param)
    args = NamespaceDict(**args)

    args["train_seed"] = args["seed"][0]
    _set_random(args["train_seed"])
    _set_device(args)
    data_manager = DataManager(
        args["dataset"],
        args["shuffle"],
        args["seed"],
        args["init_cls"],
        args["inc_cls"],
        args,
    )

    args["nb_classes"] = data_manager.nb_classes  # update args
    args["nb_tasks"] = data_manager.nb_tasks
    print(args)

    learner = get_learner(args["learner_name"], args, data_manager)
    learner.data_manager = data_manager
    learner.before_task()

    tgt_task = 0
    inc_cls = args["inc_cls"]
    _known_classes = tgt_task * inc_cls
    _total_classes = (tgt_task + 1) * inc_cls
    test_dataset = data_manager.get_dataset(
        np.arange(0, _total_classes), source="test", mode="test"
    )
    test_loader = DataLoader(
        test_dataset,  # type: ignore
        batch_size=_batch_size,
        shuffle=False,
        num_workers=4,
    )
    _, x, y = next(iter(test_loader))
    # display image x[idx]
    visual(
        imgs=x,
        targets=y,
        models={"timm": None, "ss": None},
        methods=["tokencam"],
        ckp_path=args["ckp_path"],
        device=learner._device,
        need_target=False,
    )
