import os
from random import sample
import sys
os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
import collections
import torch
# from torch.utils.data import DataLoader
from tqdm import tqdm
from data_p import make_basic_dataset, demo_transforms
# from model import ParsingReidModel
# from main import make_config, build_model

# from vehicle_reid_pytorch.utils import load_checkpoint, save_checkpoint, merge_configs, get_host_ip, read_rgb_image
from pytorch_grad_cam import GradCAM, ScoreCAM, GradCAMPlusPlus, AblationCAM, XGradCAM, EigenCAM, LayerCAM
from pytorch_grad_cam.utils.image import show_cam_on_image
import matplotlib.pyplot as plt
from utils.visual import read_rgb_image
# from modeling.baseline_pd import Baseline as Baseline_pd
from modeling import build_model
from torch.utils.data import DataLoader
from pytorch_grad_cam.utils.model_targets import ClassifierOutputTarget


class MY_PVASFF(torch.nn.Module):
    def __init__(self, cfg, classes):
        super(MY_PVASFF, self).__init__()
        # self.PPVASFF = ParsingReidModel(576, cfg.model.last_stride, cfg.model.pretrain_model, cfg.model.neck,
        #                      cfg.test.neck_feat, cfg.model.name, cfg.model.pretrain_choice).to(cfg.device)
        self.PPVASFF = build_model(cfg, 576).to('cuda')
        # self.PPVASFF = build_model(cfg, 576).to('cuda')             #(cfg, classes).to(cfg.test.device)

        self.batch = None


    def forward(self, image):
        # mask = self.batch['mask'].unsqueeze(0).to('cuda:0')
        # output = self.PPVASFF(image, mask)

        output = self.PPVASFF(image)
        # print(output[0].shape)
        # raise "sss"
        score = output[0]    # ['cls_score']
        # print(output[0].shape)
        # print(output[1][0].shape)
        # print(output[1][1].shape)
        # print(output[1][2].shape)
        # print(output[2].shape)
        # print(output.shape)
        # raise "sss"    # output[0], output[1], 
        return output[1]    # output[2]


def run(cfg, model_path, use_attention, preview_model):
    val_transform = demo_transforms.get_validation_augmentations((256, 256))
    # cfg = make_config()
    # device = 'cpu'
    device = 'cuda:0'
    # cfg.model.attention = use_attention
    # cfg = merge_configs(cfg, config_file, cmd_configs)
    # cfg.freeze()
    # train_dataset, valid_dataset, meta_dataset = make_basic_dataset(cfg.data.pkl_path,
    #                                                                 cfg.data.train_size,
    #                                                                 cfg.data.valid_size,
    #                                                                 cfg.data.pad,
    #                                                                 test_ext=cfg.data.test_ext,
    #                                                                 re_prob=cfg.data.re_prob,
    #                                                                 with_mask=cfg.data.with_mask, )

    from data_p import demo_transforms as demo_trans
    val_transform = demo_trans.get_validation_augmentations((256, 256))
    preprocessing = demo_trans.get_preprocessing()

    train_dataset, valid_dataset, meta_dataset = make_basic_dataset(cfg, "/home/ubuntu/yuyu/pven/example/outputs/veri776.pkl", #cfg.data.pkl_path,vehicleid.pkl
                                                                    (256, 256), #cfg.data.train_size,
                                                                    (256, 256), #cfg.data.valid_size,
                                                                    10, #cfg.data.pad,
                                                                    test_ext='',  #test_ext=cfg.data.test_ext,_800
                                                                    re_prob=0.5, #re_prob=cfg.data.re_prob,
                                                                    with_mask=False, #with_mask=cfg.data.with_mask,
                                                                    )
    
    val_loader = DataLoader(valid_dataset, batch_size=4, num_workers=1,  # cfg.data.test_num_workers
                              pin_memory=True, shuffle=False)
    sample = {'filename': ['0004540.jpg'],
                    'image_path': ['/home/ubuntu/yuyu/UFDN-Reid/0654_c005_00052640_0.jpg'],   #image_query  image_test  VeRi/image_test/0096_c015_00026665_0.jpg
                    #   'image_path': ['/home/ubuntu/yuyu/pven6005/0014_c012_00040755_0.jpg'],   0627_c004_00015275_0    0112_c010_00007710_0
                    #   'image_path': ['/home/ubuntu/yuyu/pven6005/0486_c007_00040755_0.jpg'],   0153_c001_00083845_0
                    'id': '27', 'cam': '017',
                    'mask_path': '/home/ubuntu/yuyu/datasets/veri776_masks/gallery/0582_c006_00028835_0.png', 'image':''}     # gallery
                    #   'mask_path': '/home/ubuntu/yuyu/pven6005/0014_c012_00040755_0_mask.png', 'image':''}
                    #   'mask_path': '/home/ubuntu/yuyu/pven6005/0486_c007_00040755_0_mask.png', 'image':''}
    sample['image'] = read_rgb_image('/home/ubuntu/yuyu/UFDN-Reid/0654_c005_00052640_0.jpg')

    import cv2
    import numpy as np
    mask = cv2.imread(sample["mask_path"], cv2.IMREAD_GRAYSCALE)
    mask = [mask == v for v in range(5)]
    mask = np.stack(mask, axis=-1).astype('float32')
    sample["mask"] = mask

    # 数据增强
    sample = val_transform(**sample)
    # preprocessing
    sample = preprocessing(**sample)



    # valid_loader = DataLoader(valid_dataset, batch_size=1,    #cfg.data.batch_size,
    #                           num_workers=cfg.data.test_num_workers, pin_memory=True, shuffle=False)

    model = MY_PVASFF(cfg, 576)    #meta_dataset.num_train_ids)
    #model.batch = sample
    # state_dict = torch.load(model_path, 'cpu')

    state_metas = torch.load(model_path, 'cpu')
    predict_dict = state_metas['state_dict']

    model_dict = model.PPVASFF.state_dict()
    new_dict = collections.OrderedDict()
        # print(predict_dict.keys())
        # print(model_dict.keys())
        # raise "ss"
        # exit()
    # for k, v in predict_dict.items():
    #     if k[7:] in model.state_dict().keys() and v.size() == model.state_dict()[k[7:]].size():
    #         new_dict[k[7:]] = v

    for k, v in predict_dict.items():
        if k in model.PPVASFF.state_dict().keys() and v.size() == model.PPVASFF.state_dict()[k].size():
            new_dict[k] = v

            
    print('loading params {}'.format(new_dict.keys()))
        # # raise "sss"
    for k, v in model_dict.items():
        if k not in new_dict.keys():
            new_dict[k] = v
        
    model_dict.update(new_dict)
    model.PPVASFF.load_state_dict(model_dict)


    # model.PPVASFF.load_state_dict(state_dict, strict=False)
    if True:
        for idx, batch in tqdm(enumerate(val_loader)):
            for name, item in batch.items():
                if isinstance(item, torch.Tensor):
                    batch[name] = item.to(device)
            input_tensor = batch['image']
            
            model.batch = batch

            # score = model(input_tensor)
    # else:
    # # idx = 0
            # print(sample['image'].shape)
            # print(type(sample['image']))
            # print(sample['image_path'])
            # input_tensor = sample['image'].unsqueeze(0).to(device)

            # if isinstance(sample['image'], torch.Tensor):
            # else:
            #     input_tensor = torch.from_numpy(sample['image']).unsqueeze(0).to(device)
            
            model.eval()
    # output = model(input_tensor)

            if preview_model:
                print(model)
                print('\n Please remove `--preview-model` to get the CAM.')
                return
            if not use_attention:
                target_layers = [model.PPVASFF.base.layer3[-1]]
            else:
            # target_layers = [model.PPVASFF.attentions[0]]
            # print(model.PPVASFF.base.layer4)   spatial_attens[3][-1]
            # raise "ssss"
                if True:
                    # target_layers = [model.PPVASFF.base.layer4_2]
                    # def reshape_transform(tensor, height=16, width=16):
                    #     # result = tensor.unsqueeze(-1).unsqueeze(-1)      # reshape(tensor.size(0), height, width, tensor.size(2))
                    #     result = tensor[:, 1:  , :].reshape(tensor.size(0),
                    #         height, width, tensor.size(2))
                    #     # Bring the channels to the first dimension,
                    #     # like in CNNs.
                    #     # result = result.transpose(2, 3).transpose(1, 2)
                    #     return result                    
                    # model.PPVASFF.base.blocks_token_only[0].gcnl_1l[0]      model.PPVASFF.reduction_layers[1]
                    target_layers = [model.PPVASFF.base.blocks_token_only[0].gcnl_2l[0]]      
                    # target_layers = [model.PPVASFF.base.layer4] 
                    cam = GradCAM(model=model, target_layers=target_layers,  use_cuda=False)   # , reshape_transform=reshape_transform,

                    
                    
         
                    grayscale_cam = cam(input_tensor=input_tensor)    #  , targets=targets, target_category=None)
                    # print(grayscale_cam.shape)
                    # print(input_tensor.shape)
                    # raise '746'
                    for i in range(grayscale_cam.shape[0]):
                        grayscale_ = grayscale_cam[i, :]

                        x = read_rgb_image(batch["image_path"][i])      #  修改
                        # x = read_rgb_image(sample["image_path"][i])
                        samples = val_transform(image=x)
                        rgb_img = samples['image'] / 255.0
           
                        visualization = show_cam_on_image(rgb_img, grayscale_, use_rgb=True)
                        plt.imshow(visualization)
                        if True:
                            plt.savefig('/home/ubuntu/yuyu/UFDN-Reid/heat_map_picture/' + str('33')+ str(batch["image_path"][i][44:-4]) + 'spatial_attens'  + '.png')
                        else:
                            plt.savefig('/home/ubuntu/yuyu/UFDN-Reid/heat_map_picture/' + str('gg') + '0654_c005_00052640_0_layer.png')
                        plt.show()
                    raise "ss"
                else:    
                    target_lls = []
                    for j in range(4):
                        target_layers = [model.PPVASFF.base.spatial_attens[j][-1]]
                        target_lls.append(target_layers)
            # print([model.PPVASFF.gap])
            # print('=============================')
            # print([model.PPVASFF.base.layer4_2])
            # print(target_layers)  spatial_attens[-1]
            # raise "sss"
        # target_layers = [model.PPVASFF.hrcn_momo]

        # cam = GradCAM(model=model, target_layers=target_layers, use_cuda=False)
                    for j in range(4):
                        cam = LayerCAM(model=model, target_layers=target_lls[j], use_cuda=False)
        # print(input_tensor.shape)
        # targets = [ClassifierOutputTarget(576)]
                        grayscale_cam = cam(input_tensor=input_tensor)    #  , targets=targets, target_category=None)

                        for i in range(grayscale_cam.shape[0]):
                            grayscale_ = grayscale_cam[i, :]

                            x = read_rgb_image(batch["image_path"][i])      #  修改
                            # x = read_rgb_image(sample["image_path"][i])
                            sample = val_transform(image=x)
                            rgb_img = sample['image'] / 255.0
           
            # sample = val_transform(image=x)
            # rgb_img = sample['image'] / 255.0

                            visualization = show_cam_on_image(rgb_img, grayscale_, use_rgb=True)
                            plt.imshow(visualization)
                            if True:
                    # plt.savefig('./pic/' + str(idx + 1) + '_' + str(i + 1) + 'attention.png')
                # plt.savefig('./pic/' + str(idx + 1) + '_' + str(i + 1) + 'uafm.png')
                # plt.savefig('./glapic/' + str(idx + 1) + '_' + str(i + 1) + 'glo_loc.png')
                # plt.savefig('./805file/' + str(idx + 1) + '_' + str(i + 1) + '805photo.png')
                                plt.savefig('/home/ubuntu/yuyu/UFDN-Reid/heat_map_picture/'+ str(j) +"_"+ str(batch["image_path"][i][44:-4]) + 'Local_' + '.png')
                            else:
                                plt.savefig('/home/ubuntu/yuyu/UFDN-Reid/heat_map_picture/' + str(j) + '_0303_c012_00063020_0' + 'Local_'  + '.png')
                            plt.show()
                        # sample['image'] = read_rgb_image('/home/ubuntu/yuyu/UFDN-Reid/0303_c012_00063020_0.jpg')
                        # sample['image_path'] = ['/home/ubuntu/yuyu/UFDN-Reid/0303_c012_00063020_0.jpg']
                    # raise "ss"
                


if __name__ == '__main__':
    # attention_model_path = 'E:/redetection/PVASFF_local/outputs/remote/model_141_attention.pth'
    # no_model_path = 'E:/redetection/PVASFF_local/outputs/remote/model_147_no_attention.pth'
    no_model_path = '/home/ubuntu/yuyu/pven6005/outputs/veri776_b64_pven/8340_no_vis_ibn120model.pth'  #8374_r1h_model110   8340_no_vis_ibn120model no_feat_ibn120model   8374_r1h_model110.pth  #8340_no_vis_ibn120model
    # no_model_path = '/home/ubuntu/yuyu/pven6005/outputs/veri776_b64_pven/8059_model110.pth'
    use_attention = True
    preview_model = False
    run(no_model_path, use_attention, preview_model)
