import sys
import cv2  # OpenCV for image processing
import numpy as np  # NumPy for numerical operations
import argparse
import timm
import matplotlib.pyplot as plt
import torch
from torch.utils.data import DataLoader

from main import setup_parser, merge_config, load_json
from utils.data_manager import DataManager
from utils import factory
from trainer import _set_device, _set_random

from pytorch_grad_cam import GradCAM
from visual.token_cam import TokenCAM
from pytorch_grad_cam.utils.image import show_cam_on_image


__CAM = {
    "gradcam": GradCAM,
    "tokencam": TokenCAM,
}


class ModelWrapper(torch.nn.Module):
    def __init__(self, model):
        super(ModelWrapper, self).__init__()
        self._set_trainable(model, True)
        self.original_model = model

    def _set_trainable(self, model, requires_grad):
        for param in model.parameters():
            param.requires_grad = requires_grad

    def forward(self, x):
        return self.original_model(x)["logits"]

    def __getattr__(self, name):
        # Delegate attribute access to the original model if it's not found in the wrapper
        if name != "original_model" and hasattr(self.original_model, name):
            return getattr(self.original_model, name)
        return super(ModelWrapper, self).__getattr__(name)


class NamespaceDict(argparse.Namespace, dict):
    def __init__(self, *args, **kwargs):
        # Initialize dict with given arguments
        dict.__init__(self, *args, **kwargs)
        # Set all keys as attributes
        for key, value in self.items():
            super().__setattr__(key, value)

    def __getitem__(self, key):
        # Allow dict-style access
        return dict.__getitem__(self, key)

    def __setitem__(self, key, value):
        # Allow dict-style setting and also set attributes
        dict.__setitem__(self, key, value)
        super().__setattr__(key, value)

    def __delitem__(self, key):
        # Allow deletion by both dict-style and attribute-style
        dict.__delitem__(self, key)
        super().__delattr__(key)

    def __getattr__(self, key):
        # Called when accessing non-existent attributes; fallback to dict
        try:
            return dict.__getitem__(self, key)
        except KeyError:
            raise AttributeError(
                f"'NamespaceDict' object has no attribute '{key}'"
            )

    def __setattr__(self, key, value):
        # Set both dict key and attribute when using dot notation
        dict.__setitem__(self, key, value)
        super().__setattr__(key, value)

    def __delattr__(self, key):
        # Allow attribute-style deletion
        try:
            dict.__delitem__(self, key)
        except KeyError:
            pass
        super().__delattr__(key)


def reshape_transform(tensor, height=14, width=14):
    result = tensor[:, 1:, :].reshape(
        tensor.size(0), height, width, tensor.size(2)
    )
    result = result.transpose(2, 3).transpose(
        1, 2
    )  # Rearrange dimensions to (C, H, W)
    return result


def get_args(args=None):
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--aug_smooth",
        action="store_true",
        help="Apply test time augmentation to smooth the CAM",
    )
    parser.add_argument(
        "--eigen_smooth",
        action="store_true",
        help="Reduce noise by taking the first principle componenet"
        "of cam_weights*activations",
    )

    parser.add_argument(
        "--model",
        type=str,
        default="mine",
        help="choose timm or custom method",
    )
    parser.add_argument(
        "--method",
        type=str,
        default="gradcam",
        help="choose cam methods",
    )

    parser.add_argument(
        "--cam_visual",
        action="store_true",
        help="whether to visualize the cam",
    )

    if args is None:
        # args default to the system args
        args = sys.argv[1:]
    else:
        # make sure that args are mutable
        args = list(args)

    args = parser.parse_args(args)
    return args


if __name__ == "__main__":
    arg_list = [
        "--config",
        "exps/sdm/imr_inc10.json",
        "--prefix",
        "debug",
        "--device",
        "3",
    ]
    # arg_list = [
    #     "--config",
    #     "./exps/baseline/single/imr_inc10.json",
    #     "--prefix",
    #     "debug",
    #     "--device",
    #     "8",
    # ]
    a = setup_parser().parse_args(arg_list)
    b = get_args(["--model", "timm", "--method", "gradcam", "--cam_visual"])
    b = get_args(["--model", "ss", "--method", "tokencam", "--cam_visual"])

    # Create a new Namespace to store the merged result
    merged_args = argparse.Namespace()
    # Add attributes from `a` to the merged Namespace
    for key, value in vars(a).items():
        setattr(merged_args, key, value)
    # Add attributes from `b` to the merged Namespace (this will overwrite existing values if the same key exists)
    for key, value in vars(b).items():
        setattr(merged_args, key, value)

    param = load_json(merged_args.config)
    args = merge_config(merged_args, param)
    args = NamespaceDict(**args)

    args["train_seed"] = args["seed"][0]
    _set_random(args["train_seed"])
    _set_device(args)
    data_manager = DataManager(
        args["dataset"],
        args["shuffle"],
        args["seed"],
        args["init_cls"],
        args["inc_cls"],
        args,
    )

    args["nb_classes"] = data_manager.nb_classes  # update args
    args["nb_tasks"] = data_manager.nb_tasks
    print(args)

    learner = get_learner(args["learner_name"], args)
    learner.data_manager = data_manager
    learner.before_task()

    tgt_task = 0
    inc_cls = args["inc_cls"]
    _known_classes = tgt_task * inc_cls
    _total_classes = (tgt_task + 1) * inc_cls
    test_dataset = data_manager.get_dataset(
        np.arange(0, _total_classes), source="test", mode="test"
    )
    test_loader = DataLoader(
        test_dataset,  # type: ignore
        batch_size=args["batch_size"],
        shuffle=False,
        num_workers=4,
    )
    _, x, y = next(iter(test_loader))
    # display image x[idx]

    idx = -1
    # [C, H, W] -> [1, C, H, W]
    input_img = x[idx].unsqueeze(0).to(learner._device)
    # [1] -> [1, 1]
    target = y[idx].unsqueeze(0).to(learner._device)
    # convert input_img to rgb
    rgb_img = cv2.cvtColor(
        input_img.cpu().numpy().squeeze().transpose(1, 2, 0), cv2.COLOR_BGR2RGB
    )

    model_name = args.model
    save_pth = f"idx_{target}.jpg"

    if model_name == "timm":
        # alert it is a test example
        print("\n" + "=" * 60)
        print("Using timm model")
        print("=" * 60 + "\n")
        model = timm.create_model(
            "vit_base_patch16_224.augreg_in21k", pretrained=True, img_size=1024
        )
        target_layers = [model.blocks[-1]]  # type: ignore
    else:
        om = torch.load(
            "ckps/sdm/vit_base_patch16_224_in21k_sdm/imagenetr/20241011162216/imagenetr_7.pth"
        )
        osd = om.state_dict()
        state_dict = learner._network.state_dict()
        for k, v in osd.items():
            if k in state_dict and state_dict[k].shape == v.shape:
                # if state_dict[k].shape == v.shape:
                state_dict[k] = v
        msg = learner._network.load_state_dict(state_dict, strict=False)
        model = ModelWrapper(learner._network)
        for k, param in model.named_parameters():
            if "adapter" in k:
                param.requires_grad = (
                    True  # Disable gradient computation for the
                )
        target_layers = [model.backbone.blocks[-1], model.backbone.cur_adapter[-1]]  # type: ignore
        target_layers = [model.backbone.cur_adapter[-3]]  # type: ignore
        target_layers = [model.backbone.blocks[-1]]  # type: ignore
    model.to(learner._device)
    model.eval()

    print("\n" + "=" * 60)
    print(f"Using {args.method} method")
    print("=" * 60 + "\n")
    cam = __CAM[args.method](
        model=model,
        target_layers=target_layers,  # type: ignore
        reshape_transform=reshape_transform,
    )

    # AblationCAM and ScoreCAM have batched implementations.
    # You can override the internal batch size for faster computation.
    input_tensor = input_img
    targets = target
    # targets = None
    grayscale_cam = cam(
        input_tensor=input_tensor,
        targets=targets,
        eigen_smooth=args.eigen_smooth,
        aug_smooth=args.aug_smooth,
    )

    # Here grayscale_cam has only one image in the batch
    grayscale_cam = grayscale_cam[0, :]

    cam_image = show_cam_on_image(
        rgb_img, grayscale_cam, use_rgb=False, image_weight=0.5
    )
    cv2.imwrite("GradCAM_cam.jpg", cam_image)
    plt_cam_image = cv2.cvtColor(cam_image, cv2.COLOR_BGR2RGB)
    plt.figure(figsize=(20, 10))
    plt.subplot(1, 2, 1)
    plt.imshow(rgb_img)
    plt.title("Original Image", fontsize=30)
    plt.subplot(1, 2, 2)
    plt.imshow(plt_cam_image)
    plt.title(f"{args.method} CAM", fontsize=30)
    plt.savefig("cam.jpg")
