import argparse
import os
import random

import cv2
import numpy as np
import torch
import torch.nn.parallel
from torch.utils.data import DataLoader
from tqdm import tqdm
import torch.backends.cudnn as cudnn
from dense import DenseNet
from ACR.base.dataset1 import DynamicFARDataset
from ACR.base.parse_config import ConfigParser
from ACR.inpainting_metric import get_inpainting_metrics
from ACR.networks.generators import ACRModel
from MAE.util.misc import get_mae_model
from PIL import Image

def main_ir(gpu, args, config):

    # build models architecture, then print to console
    acr = ACRModel(config['g_args']).requires_grad_(False)
    acr.cuda(gpu).eval()
    mae = get_mae_model('mae_vit_large_patch16', mask_decoder=config['mask_decoder']).requires_grad_(False)
    mae.cuda(gpu).eval()
    if rank == 0:
        print('MAE', sum(p.numel() for p in mae.parameters()))
        print('G', sum(p.numel() for p in acr.G.parameters()))
        print('GCs', sum(p.numel() for p in acr.GCs.parameters()))

    if args.load_pl:  # load ckpt from pytorch lightning
        print("Loading checkpoint: {} ...".format(args.resume))
        checkpoint = torch.load('./construction_ir.ckpt', map_location='cpu')['state_dict']
        mae_weights = {}
        g_weights = {}
        for k in checkpoint:
            if k.startswith('mae.'):
                mae_weights[k.replace('mae.', '')] = checkpoint[k]
            if k.startswith('acr.'):
                g_weights[k.replace('acr.', '')] = checkpoint[k]
        mae.load_state_dict(mae_weights)
        acr.load_state_dict(g_weights)
    else:
        print("Loading checkpoint: {} ...".format(args.mae_ckpt))
        checkpoint = torch.load(args.mae_ckpt, map_location='cpu')
        mae.load_state_dict(checkpoint['model'])
        if args.g_ckpt is None:
            args.g_ckpt = 'G_last.pth'
        resume_path = os.path.join(str(config.resume), args.g_ckpt)
        print("Loading checkpoint: {} ...".format(resume_path))
        checkpoint = torch.load(resume_path, map_location='cpu')
        acr.G.load_state_dict(checkpoint['generator'])
        acr.GCs.load_state_dict(checkpoint['gc_encoder'])

    eval_path = args.output_path
    os.makedirs(eval_path, exist_ok=True)
    return mae,acr

def main_vis(gpu, args, config):

    # build models architecture, then print to console
    acr = ACRModel(config['g_args']).requires_grad_(False)
    acr.cuda(gpu).eval()
    mae = get_mae_model('mae_vit_large_patch16', mask_decoder=config['mask_decoder']).requires_grad_(False)
    mae.cuda(gpu).eval()
    if rank == 0:
        print('MAE', sum(p.numel() for p in mae.parameters()))
        print('G', sum(p.numel() for p in acr.G.parameters()))
        print('GCs', sum(p.numel() for p in acr.GCs.parameters()))

    if args.load_pl:  # load ckpt from pytorch lightning
        print("Loading checkpoint: {} ...".format(args.resume))
        checkpoint = torch.load('./construction_vis.ckpt', map_location='cpu')['state_dict']
        mae_weights = {}
        g_weights = {}
        for k in checkpoint:
            if k.startswith('mae.'):
                mae_weights[k.replace('mae.', '')] = checkpoint[k]
            if k.startswith('acr.'):
                g_weights[k.replace('acr.', '')] = checkpoint[k]
        mae.load_state_dict(mae_weights)
        acr.load_state_dict(g_weights)
    else:
        print("Loading checkpoint: {} ...".format(args.mae_ckpt))
        checkpoint = torch.load(args.mae_ckpt, map_location='cpu')
        mae.load_state_dict(checkpoint['model'])
        if args.g_ckpt is None:
            args.g_ckpt = 'G_last.pth'
        resume_path = os.path.join(str(config.resume), args.g_ckpt)
        print("Loading checkpoint: {} ...".format(resume_path))
        checkpoint = torch.load(resume_path, map_location='cpu')
        acr.G.load_state_dict(checkpoint['generator'])
        acr.GCs.load_state_dict(checkpoint['gc_encoder'])

    eval_path = args.output_path
    os.makedirs(eval_path, exist_ok=True)
    return mae,acr

def merge(crops_tensor, positions, original_size):
    batch_size, channels, original_h, original_w = original_size
    reconstructed = torch.zeros((batch_size, channels, original_h, original_w)).to(crops_tensor.device)

        # 遍历裁剪后的张量，逐步放回原始位置
    for idx, (batch_idx, start_h, start_w) in enumerate(positions):
        crop = crops_tensor[idx]
        _, crop_h, crop_w = crop.size()
        reconstructed[batch_idx, :, start_h:start_h + crop_h, start_w:start_w + crop_w] = crop

    return reconstructed

def cover_vis(items,mae,acr):
    mae_feats = mae.forward_return_feature(items['img_256'], items['img1_256'], items['mask_256'],
                                           items['mask1_256'])
    items['mae_feats'] = mae_feats
    # items['scores'] = scores

    # model inference
    gen_img = acr.forward(items)
    gen_img = items['image'] * (1 - items['mask']) + gen_img * items['mask']
    gen_img = torch.clamp(gen_img * 255.0, 0, 255)
    gen_img = merge(gen_img, items['position'], (1, 3, items['new_height'],items['new_width']))
    mask=merge(items['mask'], items['position'], (1, 3, items['new_height'],items['new_width']))
    gen_img = gen_img.permute(0, 2, 3, 1).int().cpu().numpy()
    mask = mask.permute(0, 2, 3, 1).int().cpu().numpy()
    return gen_img[0],mask[0]


def cover_ir(items,mae,acr):
    mae_feats = mae.forward_return_feature(items['img_256'], items['img1_256'], items['mask_256'],
                                           items['mask1_256'])
    items['mae_feats'] = mae_feats
    # items['scores'] = scores

    # model inference
    gen_img = acr.forward(items)
    gen_img = items['image'] * (1 - items['mask']) + gen_img * items['mask']
    gen_img = torch.clamp(gen_img * 255.0, 0, 255)
    gen_img = merge(gen_img, items['position'], (1, 3, items['new_height'], items['new_width']))
    mask = merge(items['mask'], items['position'], (1, 3, items['new_height'], items['new_width']))
    gen_img = gen_img.permute(0, 2, 3, 1).int().cpu().numpy()
    mask = mask.permute(0, 2, 3, 1).int().cpu().numpy()
    return gen_img[0], mask[0]

def fusion_model():
    dense = DenseNet()
    dense_dict = dense.state_dict()
    checkpoint = torch.load(args.fusion_model, map_location='cpu')
    checkpoint_model = checkpoint['model']
    # for k, v in checkpoint_model.items():
    #     print(k)
    # print("!!!!!!!!")
    # for k in dense_dict:
    #     print(k)
    pretrained_dict = {}
    for k, v in checkpoint_model.items():
        if k.replace('dense_net.', "") in dense_dict:
            pretrained_dict[k.replace('dense_net.', "")] = v

    # pretrained_dict = {k: v for k, v in checkpoint_model.items() if k.replace('dense_net.') in dense_dict}
    dense_dict.update(pretrained_dict)
    dense.load_state_dict(dense_dict)
    dense = dense.eval()
    return dense

def change(items):
    t=items['image']
    items['image']=items['image1']
    items['image1']=t

    t=items['img_256']
    items['img_256']=items['img1_256']
    items['img1_256']=t

    t=items['mask']
    items['mask']=items['mask1']
    items['mask1']=t

    t=items['mask_256']
    items['mask_256']=items['mask1_256']
    items['mask1_256']=t

    return items
if __name__ == '__main__':
    args = argparse.ArgumentParser(description='PyTorch Template')
    args.add_argument('-c', '--config', default='./configs/config_FAR_places21.yml', type=str,
                      help='config file path (default: None)')
    args.add_argument('-e', '--exp_name', default=None, type=str)
    args.add_argument('-r', '--resume', default='./construction_ir.ckpt', type=str,
                      help='path to latest checkpoint (default: None)')
    args.add_argument('--mae_ckpt', default=None, type=str)
    args.add_argument('--g_ckpt', default=None, type=str)
    args.add_argument('--output_path', type=str, default='./outputs3')
    args.add_argument('--load_pl', default='True')
    args.add_argument('--image_size', type=int, default=256, help='Test image size')
    args.add_argument('--fusion_model',default='./checkpoint_G-171.pth')

    # custom cli options to modify configuration from default values given in json file.
    args = args.parse_args()
    config = ConfigParser.from_args(args, mkdir=False)
    SEED = 3407
    # initialize random seed
    torch.manual_seed(SEED)
    torch.cuda.manual_seed_all(SEED)
    np.random.seed(SEED)
    random.seed(SEED)
    cudnn.benchmark = True
    args.world_size = 1

    rank = 0
    torch.cuda.set_device(3)

    # dataloader
    data_args = config['dataset']
    data_args['rel_pos_num'] = config['g_args']['rel_pos_num']
    data_args['use_mpe'] = config['g_args']['use_mpe']
    data_args['default_size'] = args.image_size
    val_dataset = DynamicFARDataset(data_args, config['val_flist'], mask_path=None,
                                    batch_size=1, augment=False, training=False,
                                    test_mask_path=config['test_mask_flist'],test_mask_path1=config['test_mask_flist1'], world_size=1)
    val_loader = DataLoader(val_dataset, shuffle=False, pin_memory=True, batch_size=1, num_workers=0)
    num=0

    mae_ir, acr_ir = main_ir(3, args, config)
    mae_vis, acr_vis = main_vis(3, args, config)
    model = fusion_model()
    with torch.no_grad():
        for items in tqdm(val_loader):
            for k in items:
                if type(items[k]) is torch.Tensor:
                    if k!='rel_pos':
                        items[k] = items[k][0].to('cuda:3')
                    else:
                        items[k] = items[k][0].to('cuda:3')
            vis=items['image']
            ir=items['image1']
            num += 1
            print(num)
            # if num<26:
            #     continue
            vis_feature,vis_mask = cover_vis(items, mae_vis, acr_vis)
            items = change(items)
            ir_feature,ir_mask = cover_ir(items, mae_ir, acr_ir)

            vis_feature=np.clip(vis_feature, 0, 255)
            ir_feature=np.clip(ir_feature, 0, 255)
            # vis = merge(vis, items['position'], (1, 3, items['new_height'], items['new_width']))
            # ir = merge(ir, items['position'], (1, 3, items['new_height'], items['new_width']))
            fusion, _, _ = model(vis_feature,ir_feature)
            fusion = torch.einsum('bchw->bhwc', fusion).detach().numpy()[0]
            ir_feature=Image.fromarray(ir_feature.astype('uint8'))
            vis_feature=Image.fromarray(vis_feature.astype('uint8'))
            ir_feature.save('./msrs_result/ir/'+str(num)+'.jpg')
            vis_feature.save('./msrs_result/vis/'+str(num)+'.jpg')
            fusion = Image.fromarray((fusion * 255).clip(0, 255).astype('uint8'))
            fusion.save('./msrs_result/fusion/' + str(num) + '.jpg')








