from pytorch_grad_cam import (
    DeepFeatureFactorization,
    GradCAM, 
    ScoreCAM, 
    GradCAMPlusPlus, 
    AblationCAM, 
    RandomCAM, 
    LayerCAM, 
    FullGrad, 
    EigenCAM, 
    XGradCAM
)   
from pytorch_grad_cam.utils.model_targets import ClassifierOutputTarget
from pytorch_grad_cam.utils.image import (
    show_cam_on_image, 
    show_factorization_on_image, 
    preprocess_image, 
    deprocess_image
)
from torchvision import transforms
import torch
from PIL import Image
import numpy as np
import json
import cv2
import warnings
warnings.filterwarnings('ignore')
from Efficientnetv2 import (
    efficientnetv2_s,
    efficientnetv2_m,
    efficientnetv2_l)  # type: ignore


def load_image(image_path,target_size=384):
    image = Image.open(image_path).convert('RGB')
    input_transformer = transforms.Compose([
        transforms.CenterCrop(target_size),
        transforms.ToTensor(),
        transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])
    input_tensor = torch.unsqueeze(input_transformer(image),dim=0)
    padded_transformer = transforms.Compose([
        transforms.CenterCrop(target_size)])
    padded_image = padded_transformer(image)
    rgb_img_float = np.float32(padded_image) / 255.
    return padded_image,rgb_img_float,input_tensor


def load_label(concept_scores, top_k=2):
    """ Create a list with the image-net category names of the top scoring categories"""
    with open("class_indices.json","r") as file_json:
        labels = json.load(file_json)
    concept_categories = np.argsort(concept_scores, axis=1)[:, ::-1][:, :top_k]
    concept_labels_topk = []
    for concept_index in range(concept_categories.shape[0]):
        categories = concept_categories[concept_index, :]    
        concept_labels = []
        for category in categories:
            score = concept_scores[concept_index, category]
            label = f"{labels[str(category)].split(',')[0]}:{score:.2f}"
            concept_labels.append(label)
        concept_labels_topk.append("\n".join(concept_labels))
    return concept_labels_topk


def load_model(weight_path=r"./weights/model-41.pth",device="cuda:0"):
    weight_path=r"./weights/model-41.pth"
    device = torch.device(device if torch.cuda.is_available() else "cpu")
    read_weight = torch.load(weight_path, map_location=device)
    model = efficientnetv2_l(num_classes=3) #.to(device)
    model.load_state_dict(read_weight)
    model.eval()
    return model


# 分别绘制不同特征区域
def draw_split_feature_visualize_image(
    image_path,
    weight_path,
    out_path,
    device="cuda:0",
    target_size=384,
    n_components=5,
    top_k=2
):
    # loading model
    model = load_model(weight_path,device=device)
    # image loading and transform
    padded_image,rgb_img_float,input_tensor = load_image(image_path,target_size)
    # 深度特征分离
    target_layer=model.head.project_conv # model 最后一层特征层
    classifier=model.head.classifier # model 最后一层全连接分类
    dff = DeepFeatureFactorization(
        model=model, 
        target_layer=target_layer, 
        computation_on_concepts=classifier)
    concepts, batch_explanations, concept_outputs = dff(input_tensor, n_components)
    concept_outputs = torch.softmax(torch.from_numpy(concept_outputs), axis=-1).numpy()    
    # loading label name code index
    concept_label_strings = load_label(concept_outputs, top_k=top_k)
    visualization = show_factorization_on_image(
        rgb_img_float, 
        batch_explanations[0],
        image_weight=0.3,
        concept_labels=concept_label_strings)
    array = np.hstack((padded_image, visualization))
    # Just for the jupyter notebook, so the large images won't weight a lot:
    if array.shape[0] > 500:
        array = cv2.resize(array, (array.shape[1]//4, array.shape[0]//4))
        image = Image.fromarray(array)
        image.save(out_path)
    return


def cam_draw_feature_heat_map_image(
    image_path,
    weight_path,
    out_path,
    device="cuda:0",
    class_indexs=[0,1],
    target_size=384):
    model = load_model(weight_path,device=device)
    padded_image,rgb_img_float,input_tensor = load_image(
        image_path,target_size=target_size)
    target_layers = [model.head[0]]
    # cam = FullGrad(model=model, target_layers=target_layers)
    # cam = XGradCAM(model=model, target_layers=target_layers)
    cam = EigenCAM(model=model, target_layers=target_layers)
    # cam = GradCAM(model=model, target_layers=target_layers)
    # cam = ScoreCAM(model=model, target_layers=target_layers)
    # cam = LayerCAM(model=model, target_layers=target_layers)
    # cam = AblationCAM(model=model, target_layers=target_layers)
    # cam = RandomCAM(model=model, target_layers=target_layers)
    # cam = GradCAMPlusPlus(model=model, target_layers=target_layers)
    targets = [ClassifierOutputTarget(class_index) for class_index in class_indexs]
    input_tensor = torch.unsqueeze(input_tensor,dim=0)
    grayscale_cam = cam(input_tensor=input_tensor, targets=targets)
    grayscale_cam = grayscale_cam[0, :]
    visualization = show_cam_on_image(rgb_img_float, grayscale_cam, use_rgb=True)
    cv2.imwrite(out_path,visualization)
    return


if __name__=="__main__":
    pass