import os
from random import sample
import sys
os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
import collections
import torch
# from torch.utils.data import DataLoader
from tqdm import tqdm
from data_p import make_basic_dataset, demo_transforms
# from model import ParsingReidModel
# from main import make_config, build_model

# from vehicle_reid_pytorch.utils import load_checkpoint, save_checkpoint, merge_configs, get_host_ip, read_rgb_image
from pytorch_grad_cam import GradCAM, ScoreCAM, GradCAMPlusPlus, AblationCAM, XGradCAM, EigenCAM, LayerCAM
from pytorch_grad_cam.utils.image import show_cam_on_image
import matplotlib.pyplot as plt
from utils.visual import read_rgb_image
# from modeling.baseline_pd import Baseline as Baseline_pd
from modeling import build_model
from torch.utils.data import DataLoader
from pytorch_grad_cam.utils.model_targets import ClassifierOutputTarget
from data.datasets.dataset_loader import read_image
from data.transforms import build_transforms
# import PIL, PIL.Image    
import numpy as np



class MY_PVASFF(torch.nn.Module):
    def __init__(self, cfg, classes):
        super(MY_PVASFF, self).__init__()
        # self.PPVASFF = ParsingReidModel(576, cfg.model.last_stride, cfg.model.pretrain_model, cfg.model.neck,
        #                      cfg.test.neck_feat, cfg.model.name, cfg.model.pretrain_choice).to(cfg.device)
        self.PPVASFF = build_model(cfg, 576).to('cuda')
        # self.PPVASFF = build_model(cfg, 576).to('cuda')             #(cfg, classes).to(cfg.test.device)

        self.batch = None


    def forward(self, image):
        # mask = self.batch['mask'].unsqueeze(0).to('cuda:0')
        # output = self.PPVASFF(image, mask)
        output = self.PPVASFF(image)
        # print(output[0].shape)
        # raise "sss"
        score = output[0]    # ['cls_score']
        # print(output[0].shape)
        # print(output[1][0].shape)
        # print(output[1][1].shape)
        # print(output[1][2].shape)
        # print(output[2].shape)
        # raise "sss"    output[0], output[1], 
        return output[1]   # output[2]


def run(cfg, model_path, use_attention, preview_model, local_rank):
    val_transform = demo_transforms.get_validation_augmentations((256, 256))
    # cfg = make_config()
    # device = 'cpu'
    device = 'cuda:0'
    # cfg.model.attention = use_attention
    # cfg = merge_configs(cfg, config_file, cmd_configs)
    # cfg.freeze()
    # train_dataset, valid_dataset, meta_dataset = make_basic_dataset(cfg.data.pkl_path,
    #                                                                 cfg.data.train_size,
    #                                                                 cfg.data.valid_size,
    #                                                                 cfg.data.pad,
    #                                                                 test_ext=cfg.data.test_ext,
    #                                                                 re_prob=cfg.data.re_prob,
    #                                                                 with_mask=cfg.data.with_mask, )
    from data import make_data_loader
    torch.cuda.set_device(local_rank)
    torch.distributed.init_process_group(backend='nccl', init_method='env://')
    os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID    
    train_loader, val_loader, num_train, num_query, num_gallery, num_classes = make_data_loader(cfg)
    train_transforms = build_transforms(cfg, is_train=False)
    # from data_p import demo_transforms as demo_trans
    # val_transform = demo_trans.get_validation_augmentations((256, 256))
    # preprocessing = demo_trans.get_preprocessing()

    # train_dataset, valid_dataset, meta_dataset = make_basic_dataset(cfg, "/home/ubuntu/yuyu/pven/pven6005/examples/outputs/veri776.pkl", #cfg.data.pkl_path,vehicleid.pkl
    #                                                                 (256, 256), #cfg.data.train_size,
    #                                                                 (256, 256), #cfg.data.valid_size,
    #                                                                 10, #cfg.data.pad,
    #                                                                 test_ext='',  #test_ext=cfg.data.test_ext,_800
    #                                                                 re_prob=0.5, #re_prob=cfg.data.re_prob,
    #                                                                 with_mask=False, #with_mask=cfg.data.with_mask,
    #                                                                 )
    
    # val_loader = DataLoader(valid_dataset, batch_size=4, num_workers=8,  # cfg.data.test_num_workers
    #                           pin_memory=True, shuffle=False)
    # sample = {'filename': ['0004540.jpg'],
    #                   'image_path': ['/home/ubuntu/yuyu/un-struct-gitee/0337_c007_00035115_0.jpg'],   #image_query  image_test  VeRi/image_test/0096_c015_00026665_0.jpg
    #                 #   'image_path': ['/home/ubuntu/yuyu/pven6005/0014_c012_00040755_0.jpg'],   0627_c004_00015275_0    0112_c010_00007710_0
    #                 #   'image_path': ['/home/ubuntu/yuyu/pven6005/0486_c007_00040755_0.jpg'],   0153_c001_00083845_0
    #                   'id': '27', 'cam': '017',
    #                   'mask_path': '/home/ubuntu/yuyu/datasets/veri776_masks/gallery/0582_c006_00028835_0.png', 'image':''}     # gallery
    #                 #   'mask_path': '/home/ubuntu/yuyu/pven6005/0014_c012_00040755_0_mask.png', 'image':''}
    #                 #   'mask_path': '/home/ubuntu/yuyu/pven6005/0486_c007_00040755_0_mask.png', 'image':''}
    # sample['image'] = read_rgb_image('/home/ubuntu/yuyu/un-struct-gitee/0337_c007_00035115_0.jpg')

    # import cv2
    # import numpy as np
    # mask = cv2.imread(sample["mask_path"], cv2.IMREAD_GRAYSCALE)
    # mask = [mask == v for v in range(5)]
    # mask = np.stack(mask, axis=-1).astype('float32')
    # sample["mask"] = mask

    # 数据增强
    # sample = val_transform(**sample)
    # preprocessing
    # sample = preprocessing(**sample)



    # valid_loader = DataLoader(valid_dataset, batch_size=1,    #cfg.data.batch_size,
    #                           num_workers=cfg.data.test_num_workers, pin_memory=True, shuffle=False)

    model = MY_PVASFF(cfg, 576)    #meta_dataset.num_train_ids)
    #model.batch = sample
    # state_dict = torch.load(model_path, 'cpu')

    state_metas = torch.load(model_path, 'cpu')
    predict_dict = state_metas['state_dict']

    model_dict = model.PPVASFF.state_dict()
    new_dict = collections.OrderedDict()
        # print(predict_dict.keys())
        # print(model_dict.keys())
        # raise "ss"
        # exit()
    # for k, v in predict_dict.items():
    #     if k[7:] in model.state_dict().keys() and v.size() == model.state_dict()[k[7:]].size():
    #         new_dict[k[7:]] = v

    for k, v in predict_dict.items():
        if k in model.PPVASFF.state_dict().keys() and v.size() == model.PPVASFF.state_dict()[k].size():
            new_dict[k] = v

            
    print('loading params {}'.format(new_dict.keys()))
        # # raise "sss"
    for k, v in model_dict.items():
        if k not in new_dict.keys():
            new_dict[k] = v
        
    model_dict.update(new_dict)
    model.PPVASFF.load_state_dict(model_dict)


    # model.PPVASFF.load_state_dict(state_dict, strict=False)
    if False:
        for idx, batch in tqdm(enumerate(val_loader)):
            for name, item in batch.items():
                if isinstance(item, torch.Tensor):
                    batch[name] = item.to(device)
            input_tensor = batch['image']

            model.batch = batch
    else:
        for i, (inputs, pids, camids, tids, distill_map, _) in enumerate(train_loader):

            inputs = inputs.cuda()
            pids = pids.cuda()
            tids = tids.cuda()
            inputs = inputs.to(device) if torch.cuda.device_count() >= 1 else inputs
            pids = pids.to(device) if torch.cuda.device_count() >= 1 else pids
            tids = tids.to(device) if torch.cuda.device_count() >= 1 else tids

            # score = model(input_tensor)
            # print(inputs.shape)
            # raise "ssss"
    # else:
    # # idx = 0
            # print(sample['image'].shape)
            # print(type(sample['image']))
            # print(sample['image_path'])
            input_tensor = inputs[1,...].unsqueeze(0).to(device)
            # print(inputs.shape)
            # print(input_tensor.shape)
            # raise "54456"

            # if isinstance(sample['image'], torch.Tensor):
            # else:
            #     input_tensor = torch.from_numpy(sample['image']).unsqueeze(0).to(device)
            
            model.eval()
    # output = model(input_tensor)

            if preview_model:
                print(model)
                print('\n Please remove `--preview-model` to get the CAM.')
                return
            if not use_attention:
                target_layers = [model.PPVASFF.base.layer3[-1]]
            else:
            # target_layers = [model.PPVASFF.attentions[0]]
            # print(model.PPVASFF.base.layer4)
            # raise "ssss"
                if True:
                    # print(model.PPVASFF.base.blocks_token_only[0].gcnl_2l[0])
                    # raise "12123"
                    target_layers = [model.PPVASFF.base.blocks_token_only[0].gcnl_2l[0]]    # spatial_attens[1][-1]
                    cam = GradCAM(model=model, target_layers=target_layers, use_cuda=False)
                    # print(input_tensor.shape)
                    # raise "ss"
                    grayscale_cams = cam(input_tensor=input_tensor)    #  , targets=targets, target_category=None)
                 
                    for i in range(grayscale_cams.shape[0]):
                        grayscale_ = grayscale_cams[i, :]
                        # print(grayscale_.shape)
                        # raise "sss"
                    # for grayscale_cam in grayscale_cams:
                        # x = read_rgb_image(batch["image_path"][i])      #  修改
                        # x = read_rgb_image(sample["image_path"][i])
                        # grayscale_cam = grayscale_cam[0:, ]
                        cv_x = read_image(distill_map[0]) # [i]
                        # print(x) 
                        # img_array = np.array(cv_x)
                        # print(img_array.shape)
                        # raise "4666"
                        # cv_img = cv2.resize(cv_img, tuple(self.size), cv2.INTER_LINEAR)
                        # cv_img = PIL.Image.fromarray(cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB))
                        
                        sample = train_transforms(cv_x)
                        # print(sample)
                        # raise "33333"
                        sample = np.transpose(sample, [1, 2, 0])
                        # sample = np.float32(sample)
                        # 将浮点数映射到0到255的范围
                        # scaled_image_data = (sample - np.min(sample)) / (np.max(sample) - np.min(sample)) * 255
                        # 将数值转换为整数类型
                        # int_image_data = np.uint8(scaled_image_data)
                        # rgb_img = int_image_data / 255.0
                        # print(int_image_data)
                        # print(rgb_img)
                        # raise "4"
                        rgb_img = np.float32(sample) / 255.0
                        
                        # x = read_rgb_image(distill_map[0])
                        # print(x.shape)
                        # sample = val_transform(image=x)
                        # print(sample2)
                        # raise "6"
                        # print(sample['image'].shape)s
                        # raise "ss"
                        # rgb_img = sample['image'] / 255.0
                        

                        visualization = show_cam_on_image(rgb_img, grayscale_, use_rgb=True)
                        plt.imshow(visualization)
                        if False:
                            plt.savefig('/home/ubuntu/yuyu/heat_map_picture/' + str(batch["image_path"][i][44:-4]) + 'backbone' + str('j') + '.png')
                        else:
                            plt.savefig('/home/ubuntu/yuyu/gitee_UFDN/UFDN-Reid/' + str('NoGCN_') + 'immmggg.png')
                        plt.show()
                        raise "ss"
                else:    
                    target_lls = []
                    for j in range(4):
                        target_layers = [model.PPVASFF.base.spatial_attens[j]]
                        target_lls.append(target_layers)
            # print([model.PPVASFF.gap])
            # print('=============================')
            # print([model.PPVASFF.base.layer4_2])
            # print(target_layers)  spatial_attens[-1]
            # raise "sss"
        # target_layers = [model.PPVASFF.hrcn_momo]

        # cam = GradCAM(model=model, target_layers=target_layers, use_cuda=False)
                    for j in range(4):
                        cam = LayerCAM(model=model, target_layers=target_lls[j], use_cuda=False)
        # print(input_tensor.shape)
        # targets = [ClassifierOutputTarget(576)]
                        grayscale_cam = cam(input_tensor=input_tensor)    #  , targets=targets, target_category=None)

                        for i in range(grayscale_cam.shape[0]):
                            grayscale_ = grayscale_cam[i, :]

                            x = read_rgb_image(batch["image_path"][i])      #  修改
                            # x = read_rgb_image(sample["image_path"][i])
                            sample = val_transform(image=x)
                            rgb_img = sample['image'] / 255.0
           
            # sample = val_transform(image=x)
            # rgb_img = sample['image'] / 255.0

                            visualization = show_cam_on_image(rgb_img, grayscale_, use_rgb=True)
                            plt.imshow(visualization)
                            if True:
                    # plt.savefig('./pic/' + str(idx + 1) + '_' + str(i + 1) + 'attention.png')
                # plt.savefig('./pic/' + str(idx + 1) + '_' + str(i + 1) + 'uafm.png')
                # plt.savefig('./glapic/' + str(idx + 1) + '_' + str(i + 1) + 'glo_loc.png')
                # plt.savefig('./805file/' + str(idx + 1) + '_' + str(i + 1) + '805photo.png')
                                plt.savefig('/home/ubuntu/yuyu/heat_map_picture/' + str(batch["image_path"][i][44:-4]) + 'Local_' + str(j) + '.png')
                            else:
                                plt.savefig('/home/ubuntu/yuyu/heat_map_picture/' + '0000688' + 'Local_' + str(j) + '.png')
                            plt.show()
                        # sample['image_path'] = ['/home/ubuntu/yuyu/UFDN-Reid/0303_c012_00063020_0.jpg']    
                # raise "ss"
                


if __name__ == '__main__':
    # attention_model_path = 'E:/redetection/PVASFF_local/outputs/remote/model_141_attention.pth'
    # no_model_path = 'E:/redetection/PVASFF_local/outputs/remote/model_147_no_attention.pth'
    no_model_path = '/home/ubuntu/yuyu/pven6005/outputs/veri776_b64_pven/8340_no_vis_ibn120model.pth'  #8374_r1h_model110   8340_no_vis_ibn120model no_feat_ibn120model   8374_r1h_model110.pth  #8340_no_vis_ibn120model
    # no_model_path = '/home/ubuntu/yuyu/pven6005/outputs/veri776_b64_pven/8059_model110.pth'
    use_attention = True
    preview_model = False
    run(no_model_path, use_attention, preview_model)
