import argparse
from collections import OrderedDict
from copy import deepcopy

import imageio
from matplotlib import pyplot as plt
import numpy as np
from numpy.core.numeric import zeros_like
from numpy.lib.shape_base import expand_dims
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision.utils import save_image
from torchvision.transforms import functional as ttf
from PIL import Image
from tqdm import tqdm, trange
import seaborn as sns
from scipy.stats import entropy

from data.cocostuff_loader import *
from data.vg import *
# from utils.util import *
from data import get_dataset

from utils import *
from components import CategoryCausalGraph

# import legacy
from model import ResnetGenerator64, ResnetGenerator128


# def get_dataloader(dataset = 'coco', img_size=128, train=False):
#     dataset = get_dataset(dataset, img_size, left_right_flip=False, train=train)
#     dataloader = torch.utils.data.DataLoader(
#                     dataset, batch_size=1,
#                     drop_last=False, shuffle=False, num_workers=1)
#     return dataloader


def truncted_randn(*size, device=torch.device('cpu'), thres=2.0,):
    z0 = torch.randn(*size, device=device)
    z1 = torch.randn(*size, device=device)
    truncation_flag = (z0.abs()>thres).float()
    return truncation_flag * z1 + (1.-truncation_flag) * z0


@torch.no_grad()
def main(args):
    
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)

    # parameters
    img_size = args.img_size
    assert img_size in [64, 128, 256, 512]

    num_classes = 184 if 'coco' in args.dataset.lower()  else 179

    # dataloader = get_dataloader(args.dataset, img_size, args.train)
    dataset = get_dataset(args.dataset, img_size, left_right_flip=False, train=args.train)
    if 'coco' in args.dataset.lower():
        np.savetxt(args.image_id_savepath, np.array(dataset.image_ids).astype(int))
        print(f"Image ids of generated images in COCO are save in {args.image_id_savepath}.")
    dataloader = torch.utils.data.DataLoader(
                    dataset, batch_size=1,
                    drop_last=False, shuffle=False, num_workers=0)
    dataloader32 = torch.utils.data.DataLoader(
                    dataset, batch_size=32,
                    drop_last=False, shuffle=False, num_workers=8)
    
    device = torch.device("cuda") if int(args.gpu) >= 0  else torch.device("cpu")
    num_o = 8
    # netG  = legacy.ResnetGenerator(img_size, num_classes, num_o = num_o)
    netG = globals()[f'ResnetGenerator{img_size}'](num_classes=num_classes)
    emaG = deepcopy(netG)
    W = CategoryCausalGraph(num_classes)
    
    print(f'Use ResnetGenerator{img_size} in {args.model_path}')

    netG = load_model( netG, os.path.join(args.model_path, 'generator.pth'), device ).eval().to(device)
    emaG = load_model( emaG, os.path.join(args.model_path, 'EMA_G.pth'), device ).eval().to(device)
    W = load_model( W, os.path.join(args.model_path, 'W.pth'), device ).eval().to(device)

    print("ema not update BN")
    # ema_update_bn(([b.float().to(device), l.long().to(device), W] for r, l, b in dataloader32), emaG, device=device)
    # ema_update_bn(([torch.randn(b.size(0), b.size(1), emaG.z_dim), b.float(), torch.randn((b.size(0), emaG.z_dim)), l.long()] for r, l, b in dataloader32), emaG, device=device)

    repeat = args.repeat
    save_path = os.path.join( args.sample_path, f"{args.dataset}_{img_size}_repeat{repeat}" )
    os.makedirs(save_path, exist_ok=True)

    print("Saved in {}".format(save_path))
    
    cg_np = W(None, None).detach().cpu().numpy()
    np.savetxt( os.path.join(save_path, f'cg.txt'), cg_np )
    print("Drawing Causal graphs.")
    fig, ax = plt.subplots()
    sns.heatmap(cg_np, cmap=None, annot=True, ax = ax)
    # writer.add_figure("causal_graph", fig, ii)
    fig.savefig(os.path.join(save_path, f'causal_graph.png'), dpi=300,)

    img_save_path = os.path.join( save_path, "img" )
    os.makedirs( img_save_path, exist_ok = True )
    crop_save_path = None
    mask_save_path = None

    if args.cropped_size > 0:
        crop_save_path = os.path.join( save_path, f"cropped_{args.cropped_size}" )
        for idx in range(num_classes):
            os.makedirs(os.path.join(crop_save_path, str(idx)), exist_ok = True)
    
    mask_save_path = os.path.join( save_path, f"masks" )

    save_mask = True
    if save_mask: 
        os.makedirs(mask_save_path, exist_ok = True)
        print(f"Will save masks in {mask_save_path}")
    raw_mask_entropy_list, mask_entropy_list = [], []

    DSflag = True #args.DS
    DS = DSscore() if DSflag else None
    
    scale_up, scale_down = [], []

    colors = colormap(num_ = num_classes, as_array=True)

    for idx, data in tqdm(enumerate(dataloader), total=len(dataloader)):
        try:
            real_image, label, bbox, _ = data
        except:
            real_image, label, bbox = data
        # real_image, label = real_image.to(device), label.long().unsqueeze(-1)
        fake_image_list = []

        with torch.no_grad():
            # print("use netG")
            # fake_images, masks, raw_masks, adjusts = netG(bbox.float().expand(repeat, -1, -1).to(device), \
            #     label.expand(repeat, -1).to(device), W, return_mask=True, random_trunc=True)
            batch_bboxes = bbox.float().expand(repeat, -1, -1).to(device)
            batch_labels = label.expand(repeat, -1).to(device)
            z = truncted_randn(batch_bboxes.size(0), batch_bboxes.size(1), netG.z_dim, device=device)
            z_im = truncted_randn(batch_bboxes.size(0), netG.z_dim, device=device)
            # z, bbox, z_im=None, y=None, return_mask=False, input_mask=None
            fake_samples, mask, raw_mask, adjust  = netG( W(batch_bboxes, batch_labels), z, 
                batch_bboxes.to(device), z_im, batch_labels.to(device), return_mask=True)
            fake_images, masks, raw_masks, adjusts = fake_samples, mask, raw_mask, adjust

            for r in range(repeat):
                fake_image = fake_images[r:(r+1)]
                mask     = masks[r:(r+1)]
                raw_mask = raw_masks[r:(r+1)]
                fake_image_list.append(fake_image)
                save_image(fake_image[0] * 0.5 + 0.5, fp=os.path.join(img_save_path, f"sample_{idx}_{r}.png") )

                if save_mask:
                    # 计算 scale factor 大于1 和 小于1 的平均值，先选择非零的值，也就是本来raw masks 有的值
                    scale_factor = mask.div(raw_mask + 1e-8).masked_select(raw_mask.gt(0.))
                    scale_up.append(   scale_factor.masked_select(scale_factor.gt(1.)) )
                    scale_down.append( scale_factor.masked_select(scale_factor.lt(1.)) )

                    
                    # for idx_, (l, b) in enumerate(zip(label[0], bbox[0])):
                    #     if l>0:
                            # x, y, w, h = b
                            # b = y, x, h, w
                            # b = torch.Tensor(b).mul(raw_mask.size(2)).int()
                            # tmp = torchvision.transforms.functional.crop( raw_mask[0, idx_], *b)[None, None, :, :]
                            # print(tmp.size())
                            # tmp = F.interpolate(tmp, size = [128, 128], mode='bilinear', align_corners=True)
                            # tmp.div_(tmp.max() + 1e-8)
                            # object_mask = torch.movedim(tmp.cpu()[0], 0, 2).numpy() * np.reshape(colors[l], [1,1,3])
                            # result = Image.fromarray( np.floor(object_mask).astype(np.uint8) )
                            # result.save(f"{save_path}/object_mask_{idx}_{r}_{l}.png")                            
                            
                    
                    raw_mask = raw_mask[0].div(raw_mask[0].sum(0, keepdim=True) + 1e-8)
                    # print(raw_mask)
                    draw_mask( raw_mask, label.squeeze().tolist(), colors).save( os.path.join(mask_save_path, f"raw_{idx}_{r}.png") )
                    raw_mask = raw_mask.cpu().numpy()
                    raw_mask[0] += 1e-8
                    raw_mask /= raw_mask.sum(0) 
                    raw_mask_entropy = entropy( raw_mask.reshape(bbox.size(1), -1) )
                    raw_mask_entropy_list.append(raw_mask_entropy.mean())

                    mask = mask[0].div(mask[0].sum(0, keepdim=True) + 1e-8)
                    draw_mask( mask, label.squeeze().tolist(), colors).save( os.path.join(mask_save_path, f"mask_{idx}_{r}.png") )
                    mask = mask.cpu().numpy()
                    mask[0] += 1e-8
                    mask /= mask.sum(0)
                    mask_entropy = entropy( mask.reshape(bbox.size(1), -1) )
                    mask_entropy_list.append(mask_entropy.mean())
                    
                    
                if args.cropped_size > 0:
                    fake_Image = ttf.to_pil_image( fake_image[0].mul(.5).add(.5) )
                    bbox_rec = torchvision.ops.box_convert(bbox.clone(), "xywh", "xyxy").mul(img_size).to(torch.int)[0]
                    for idx1, (b, l) in enumerate(zip(bbox_rec.tolist(), label.flatten().tolist())):
                        # b, l = bl[0].tolist(), bl[1]
                        if l > 0: # remove background
                            cropped = fake_Image.crop(b).resize( (args.cropped_size, args.cropped_size), resample=Image.BICUBIC )
                            cropped.save(os.path.join(crop_save_path, str(l), f"crop_{idx}_{r}_{idx1}.png"))

                # adjusts = adjusts[0]
                # for idx_, (l, b) in enumerate(zip(label[0], bbox_rec[0])):
                #     if l>0:
                        
                #         tmp = adjusts[idx_].clone().cpu().mul(torch.from_numpy(raw_mask[idx_]>0))
                #         tmp.div_( min(tmp.max().sqrt(), tmp.max() ))
                #         print(tmp.max())
                #         result = np.stack([tmp.cpu().numpy()], axis=2) * np.reshape(colors[l], [1,1,3])
                #         result = Image.fromarray( np.floor(result).astype(np.uint8) )
                #         result.save(f"{save_path}/adjust_{idx}_{r}_{l}.png")                            
                
            if DSflag and repeat>1:
                [DS(a, b) for a, b in zip(fake_image_list[:-1], fake_image_list[1:])]
            
        # break
    if save_mask:
        raw_mask_entropy_arrary = np.array( raw_mask_entropy_list )
        mask_entropy_arrary = np.array( mask_entropy_list )
        # print(raw_mask_entropy_arrary)
        # print(mask_entropy_arrary)
        print('raw mask entropy', raw_mask_entropy_arrary.mean(), '+-', raw_mask_entropy_arrary.std())
        print('mask entropy', mask_entropy_arrary.mean(), '+-', mask_entropy_arrary.std())

        scale_up_tensor = "scale_up_pixels {} +- {}".format(torch.cat(scale_up)[:].mean(), torch.cat(scale_up)[:].std())
        scale_down_tensor = "scale_down_pixels {} +- {}".format(torch.cat(scale_down)[:].mean(), torch.cat(scale_down)[:].std())
        print(scale_up_tensor, scale_down_tensor, sep='\n')

    if DSflag and save_mask:
        print( "'Diversity_Score': '{}+-{}', ".format(*DS.mean_std()),
                "'raw_mask_entropy': '{}+-{}', ".format(raw_mask_entropy_arrary.mean(), raw_mask_entropy_arrary.std()), \
                "'mask_entropy': '{}+-{}', ".format(mask_entropy_arrary.mean(), mask_entropy_arrary.std()) )


        
if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument('--seed', type=int, default=0, help='random seed')
    parser.add_argument('--dataset', type=str, default='coco',
                        help='training dataset')
    parser.add_argument('--img_size', type=int, default=128, help='image size')
    parser.add_argument("--train", action="store_true", default=False,
                        help='whether to use the training set (default=False)')                    
    parser.add_argument('--model_path', type=str,
                        help='where to load models')
    parser.add_argument('--sample_path', type=str, default='tmp',
                        help='path to save generated images')
    parser.add_argument('-r', '--repeat', type=int, default=5,
                        help='the number of copies of a given layout')
    # parser.add_argument('-D', '--DS', action="store_true", default=False,
    #                     help='whether to get DS scores')
    # parser.add_argument('-N', '--NOSaving', action="store_true", default=False,
    #                     help='whether to save images')
    parser.add_argument('-C', '--cropped_size', type=int, default=224, help='')
    parser.add_argument('--image_id_savepath', type=str, default='image_id.txt', 
                        help='the txt to record the image order for YOLO scores')
    # parser.add_argument('--save_mask', action="store_true", default=False,
    #                     help='to save generated masks')
    parser.add_argument('--gpu', type=str, default='0',
                        help='whick GPU to use')    
    args = parser.parse_args()
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    main(args)

# python test.py --dataset coco --model_path  -C --cropped_size 224

# python test.py --dataset coco -t 2.0 --model_path experiments/coco0727/model/G_200.pth --sample_path experiments/coco0727/samples/epoch_200/cropped_200/ --img_size 64 --gpu 4 

# crop generated images in size 128 to size 32
# python test.py --dataset coco --model_path experiments/coco0805/model/G_180.pth --sample_path experiments/coco0805/samples/epoch_180/cropped_32/ --img_size 128 --gpu 0 --cropped_size 32 -N -t 2.0 -r 5

# python test.py --dataset coco --model_path experiments/coco0805/model/G_180.pth --img_size 128 --gpu 0 -N -t 2.0 -r 1

# python test.py --dataset coco --model_path experiments/coco0916/model/G_200.pth --sample_path experiments/coco0916/samples/epoch_200/cropped_32/ --img_size 256 --gpu 0 --cropped_size 32 -N -t 2.0 -r 5

# python test.py --dataset coco --model_path experiments/coco_256/model/G_200.pth --sample_path tmp/ --img_size 256 --gpu 0 -t 2.0 -r 1


# python test.py --dataset coco --model_path experiments/coco_128/model/G_180.pth --sample_path tmp_figure/ --img_size 128 --gpu 7 -t 2.0 -r 5 --save_mask



# python test.py --dataset coco --model_path experiments/coco_256/model/G_200.pth --sample_path for_figure/whole/ --img_size 256 --gpu 1 -t 2.0 -r 5 --save_mask
# python test.py --dataset coco --model_path experiments/coco_128/model/G_180.pth --sample_path for_figure/whole/ --img_size 128 --gpu 1 -t 2.0 -r 5 --save_mask
# python test.py --dataset coco --model_path experiments/coco_64/model/G_200.pth --sample_path for_figure/whole/ --img_size 64 --gpu 1 -t 2.0 -r 5 --save_mask
# python test1.py --dataset coco --model_path experiments/coco_256/model/G_200.pth --sample_path for_figure/fix_z_img_0/ --img_size 256 --gpu 1 -t 2.0 -r 5 --save_mask --seed 0
# python test1.py --dataset coco --model_path experiments/coco_256/model/G_200.pth --sample_path for_figure/fix_z_img_1/ --img_size 256 --gpu 1 -t 2.0 -r 5 --save_mask --seed 1
# python test1.py --dataset coco --model_path experiments/coco_256/model/G_200.pth --sample_path for_figure/fix_z_img_2/ --img_size 256 --gpu 1 -t 2.0 -r 5 --save_mask --seed 2

# python test1.py --dataset coco --model_path experiments/coco_256/model/G_200.pth --sample_path for_figure/one_layout_one_z_img/ --img_size 256 --gpu 1 -t 2.0 -r 5 --save_mask

# python test1.py --dataset coco --model_path experiments/coco_256/model/G_200.pth --sample_path for_figure/one_layout_one_z_img_one_z_i/ --img_size 256 --gpu 1 -t 2.0 -r 5 --save_mask


# python test.py --dataset coco --model_path experiments/coco_256/model/G_200.pth --sample_path tmp --img_size 256 --gpu 1 -t 2.0 -r 5 -N

# python test.py --dataset coco --model_path experiments/coco_128/model/G_180.pth --sample_path tmp_jpg --img_size 128 --gpu 7 -t 2.0 -r 5 -N -C 224

# python test.py --dataset vg --model_path experiments/vg_64/model/G_200.pth --sample_path experiments/vg_64/samples/epoch_200/cropped_224/ --img_size 64 --gpu 0 --cropped_size 224 -N -t 2.0 -r 1

# python test.py --dataset vg --model_path experiments/vg_128/model/G_200.pth --sample_path experiments/vg_128/samples/epoch_200/cropped_224/ --img_size 128 --gpu 0 --cropped_size 224 -N -t 2.0 -r 1

# python test.py --dataset vg --model_path experiments/vg_256/model/G_200.pth --sample_path experiments/vg_256/samples/epoch_200/cropped_224/ --img_size 256 --gpu 0 --cropped_size 224 -N -t 2.0 -r 1

# python test.py --dataset vg --model_path experiments/vg_256/model/G_200.pth --sample_path experiments/vg_256/samples/epoch_200/ --img_size 256 --gpu 0 -t 2.0 -r 1