import numpy as np
import torch
import argparse
import model
import os
import imageio as io
from torch.utils.data import Subset
import degradations
import ast
import dataset
import utils
import torchvision

parser = argparse.ArgumentParser()
parser.add_argument("--post_resume_from", type=str, default='/home/baiweimin/yifei/flow-diff/results/AFHQ/gt/016--CondConvINN2-2-4-4-4-lr0.0001-bits0-GaussianNoise/cond_network_028672.pt')

# data args
parser.add_argument("--data_type", type=str, default='AFHQDataset')
parser.add_argument("--data_args", type=ast.literal_eval, default={'power_of_two': True})
parser.add_argument("--degradation_type", type=str, default='GaussianNoise')
parser.add_argument("--degradation_args", type=ast.literal_eval, default={'mean':0., 'std':0.1})
parser.add_argument("--num_bits", type=int, default=0)

parser.add_argument("--input_shape", type=int, nargs='+', default=[3, 64, 64])
parser.add_argument("--post_model_type", type=str, default='CondConvINN2')
parser.add_argument("--post_model_args", type=ast.literal_eval, default={'num_conv_layers':[2, 4, 4], 'num_fc_layers':[4], 'cond_layer_thicknesses':[32, 64, 64, 256]})
parser.add_argument("--post_actnorm", type=lambda b:bool(int(b)), help="0 or 1")
# parser.add_argument("--no_con", action='store_true')

def generate(args):
    save_folder = "/home/baiweimin/yifei/flow-diff/results/AFHQ/post/"
    device = f'cuda:{0}'
    # device = torch.device(args.gpu)
    print("device",device)
    torch.manual_seed(0)

    args.data_args['input_shape'] = args.input_shape
    print("11111",args.input_shape)
    degradation = getattr(degradations, args.degradation_type)(**args.degradation_args, input_shape=args.input_shape, num_bits=args.num_bits)
    train_dataset = getattr(dataset, args.data_type)(train=True,  ambient=True, degradation=degradation, **args.data_args)
    test_dataset  = getattr(dataset, args.data_type)(train=False, ambient=True, degradation=degradation, **args.data_args)
    test_dataset_clean  = getattr(dataset, args.data_type)(train=False, **args.data_args)

    post_model = getattr(model, args.post_model_type)(args.input_shape, cond_shape=args.input_shape, device=device, **args.post_model_args)
    # if this fails, change args.input_shape to degradation.output_shape
    post_model.load_state_dict(torch.load(args.post_resume_from), strict=False)
    post_model.to(device)
    print("pretrained flow loaded!")

    if args.data_type=="CIFARDataset":
        train_indices = [idx for idx, target in enumerate(train_dataset.targets) if target == 5]
        test_indices = [idx for idx, target in enumerate(test_dataset.targets) if target == 5]
        test_indices = [idx for idx, target in enumerate(test_dataset_clean.targets) if target == 5]
        train_dataset = Subset(train_dataset, train_indices)
        test_dataset  = Subset(test_dataset, test_indices)
        test_dataset_clean  = Subset(test_dataset_clean, test_indices)

    y_tests = [test_dataset[i][0] for i in range(4)]
    gt = [test_dataset[i][1] for i in range(4)]

    xsamps = []
    for i,yt in enumerate(y_tests):
        print(yt.shape)
        # yt = torch.cat([yt]*3, dim=0)
        yt = yt.reshape(1,*yt.shape).to(device)
        con = torch.zeros_like(yt)
        x_samp = post_model.sample(64, con, temp=1)
        # x_samp = np.swapaxes(x_samp, 1,2)
        # x_samp = np.swapaxes(x_samp, 2,3)
        xsamps.append(x_samp)
        # print(torch.mean(x_samp, dim=0).shape)
        utils.save_images(x_samp, f'{save_folder}/post_sample{i}.png', imrange=[-0.5,0.5])
        torchvision.utils.save_image((torch.tensor(x_samp[0]))+0.5, f'{save_folder}/post_sample_single{i}.png')
        torchvision.utils.save_image((torch.mean(torch.tensor(x_samp), 0, True))+0.5, f'{save_folder}/mean_post_sample{i}.png')
    
    gt = [test_dataset[i][1].detach() for i in range(4)]
    for i,yt in enumerate(gt):
        torchvision.utils.save_image(yt+0.5, f'{save_folder}/gt{i}.png')


if __name__ == '__main__':

    # os.environ['CUDA_VISIBLE_DEVICES'] = str(2)
    args = parser.parse_args()
    generate(args)