import os
from random import sample
import sys
os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))

import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from vehicle_reid_pytorch.data import make_basic_dataset, demo_transforms
# from model import ParsingReidModel
from main import make_config, build_model

from vehicle_reid_pytorch.utils import load_checkpoint, save_checkpoint, merge_configs, get_host_ip, read_rgb_image
from pytorch_grad_cam import GradCAM, ScoreCAM, GradCAMPlusPlus, AblationCAM, XGradCAM, EigenCAM
from pytorch_grad_cam.utils.image import show_cam_on_image
import matplotlib.pyplot as plt


class MY_PVASFF(torch.nn.Module):
    def __init__(self, cfg, classes):
        super(MY_PVASFF, self).__init__()
        # self.PPVASFF = ParsingReidModel(576, cfg.model.last_stride, cfg.model.pretrain_model, cfg.model.neck,
        #                      cfg.test.neck_feat, cfg.model.name, cfg.model.pretrain_choice).to(cfg.device)
        self.PPVASFF = build_model(cfg, 576).to('cuda')             #(cfg, classes).to(cfg.test.device)

        self.batch = None


    def forward(self, image):
        mask = self.batch['mask'].unsqueeze(0).to('cuda:0')
        output = self.PPVASFF(image, mask)
        # print(output)
        # raise "sss"
        score = output['cls_score']
        return score


def run(model_path, use_attention, preview_model):
    val_transform = demo_transforms.get_validation_augmentations((256, 256))
    cfg = make_config()
    # device = 'cpu'
    device = 'cuda:0'
    # cfg.model.attention = use_attention
    # cfg = merge_configs(cfg, config_file, cmd_configs)
    cfg.freeze()
    # train_dataset, valid_dataset, meta_dataset = make_basic_dataset(cfg.data.pkl_path,
    #                                                                 cfg.data.train_size,
    #                                                                 cfg.data.valid_size,
    #                                                                 cfg.data.pad,
    #                                                                 test_ext=cfg.data.test_ext,
    #                                                                 re_prob=cfg.data.re_prob,
    #                                                                 with_mask=cfg.data.with_mask, )

    from vehicle_reid_pytorch.data import demo_transforms as demo_trans
    val_transform = demo_trans.get_validation_augmentations((256, 256))
    preprocessing = demo_trans.get_preprocessing()


    sample = {'filename': ['0027_c017_00012205_0.jpg'],
                      'image_path': ['/home/ubuntu/yuyu/datasets/VeRi/image_test/0602_c019_00022775_1.jpg'],   #image_query  image_test
                    #   'image_path': ['/home/ubuntu/yuyu/pven6005/0014_c012_00040755_0.jpg'],   0627_c004_00015275_0    0112_c010_00007710_0
                    #   'image_path': ['/home/ubuntu/yuyu/pven6005/0486_c007_00040755_0.jpg'],   0153_c001_00083845_0
                      'id': '27', 'cam': '017',
                      'mask_path': '/home/ubuntu/yuyu/datasets/veri776_masks/gallery/0602_c019_00022775_1.png', 'image':''}     # gallery
                    #   'mask_path': '/home/ubuntu/yuyu/pven6005/0014_c012_00040755_0_mask.png', 'image':''}
                    #   'mask_path': '/home/ubuntu/yuyu/pven6005/0486_c007_00040755_0_mask.png', 'image':''}
    sample["image"] = read_rgb_image('/home/ubuntu/yuyu/datasets/VeRi/image_test/0602_c019_00022775_1.jpg')

    import cv2
    import numpy as np
    mask = cv2.imread(sample["mask_path"], cv2.IMREAD_GRAYSCALE)
    mask = [mask == v for v in range(5)]
    mask = np.stack(mask, axis=-1).astype('float32')
    sample["mask"] = mask

    # 数据增强
    sample = val_transform(**sample)
    # preprocessing
    sample = preprocessing(**sample)



    # valid_loader = DataLoader(valid_dataset, batch_size=1,    #cfg.data.batch_size,
    #                           num_workers=cfg.data.test_num_workers, pin_memory=True, shuffle=False)

    model = MY_PVASFF(cfg, 576)    #meta_dataset.num_train_ids)
    model.batch = sample
    state_dict = torch.load(model_path, map_location=cfg.test.device)
    model.PPVASFF.load_state_dict(state_dict, strict=False)
    # for idx, batch in tqdm(enumerate(valid_loader)):
    #     for name, item in batch.items():
    #         if isinstance(item, torch.Tensor):
    #             batch[name] = item.to(device)
    #     input_tensor = batch['image']

        # model.batch = batch
        # score = model(input_tensor)
    idx = 0
    input_tensor = sample['image'].unsqueeze(0).to(device)
    model.eval()
    output = model(input_tensor)

    if preview_model:
        print(model)
        print('\n Please remove `--preview-model` to get the CAM.')
        return
    if not use_attention:
        target_layers = [model.PPVASFF.base.layer3[-1]]
    else:
        # target_layers = [model.PPVASFF.attentions[0]]
        target_layers = [model.PPVASFF.uafm.conv_out]
        # target_layers = [model.PPVASFF.hrcn_momo]
    cam = GradCAM(model=model, target_layers=target_layers, use_cuda=False)
    grayscale_cam = cam(input_tensor=input_tensor)    #, target_category=None)

    for i in range(grayscale_cam.shape[0]):
        grayscale_ = grayscale_cam[i, :]
        # x = read_rgb_image(batch["image_path"][i])       修改
        x = read_rgb_image(sample["image_path"][i])
        sample = val_transform(image=x)
        rgb_img = sample['image'] / 255.0
        print(type(rgb_img))
        raise "ss"
        visualization = show_cam_on_image(rgb_img, grayscale_, use_rgb=True)
        plt.imshow(visualization)
        if use_attention:
            # plt.savefig('./pic/' + str(idx + 1) + '_' + str(i + 1) + 'attention.png')
            # plt.savefig('./pic/' + str(idx + 1) + '_' + str(i + 1) + 'uafm.png')
            # plt.savefig('./glapic/' + str(idx + 1) + '_' + str(i + 1) + 'glo_loc.png')
            # plt.savefig('./805file/' + str(idx + 1) + '_' + str(i + 1) + '805photo.png')
            plt.savefig('./newnewpp/' + str(idx + 1) + '_' + str(i + 1) + 'ssssssss0027_c017_00012205_0.png')
        else:
            plt.savefig('./stage_map/' + str(idx + 1) + '_' + str(i + 1) + 'no_attention.png')
        plt.show()
        pass


if __name__ == '__main__':
    # attention_model_path = 'E:/redetection/PVASFF_local/outputs/remote/model_141_attention.pth'
    # no_model_path = 'E:/redetection/PVASFF_local/outputs/remote/model_147_no_attention.pth'
    no_model_path = '/home/ubuntu/yuyu/pven6005/outputs/veri776_b64_pven/8340_no_vis_ibn120model.pth'  #8374_r1h_model110   8340_no_vis_ibn120model no_feat_ibn120model   8374_r1h_model110.pth  #8340_no_vis_ibn120model
    # no_model_path = '/home/ubuntu/yuyu/pven6005/outputs/veri776_b64_pven/8059_model110.pth'
    use_attention = True
    preview_model = False
    run(no_model_path, use_attention, preview_model)
