import argparse
import logging
import ast
import sys
import os
import time
import datetime

import torch
import torch.optim
import numpy as np

import flow_model
import degradations
import dataset_tool
import utils


def train(args):

    # logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    # logger = logging.getLogger(__name__)

    # logger.info('This is a log info')
    # logger.debug('Debugging')
    # logger.warning('Warning exists')
    # logger.info('Finish')

    # logger = logging.getLogger(__name__)
    
    device = f'cuda:{args.gpu}'
    torch.manual_seed(0)
    # logger.debug(device)

    def print0(*args, **kwargs):
        print(*args, **kwargs)
    
    print(args.batch_size)
    args.data_args['input_shape'] = args.input_shape
    if args.degradation_type!="None":
        degradation = getattr(degradations, args.degradation_type)(**args.degradation_args, input_shape=args.input_shape, num_bits=args.num_bits)
    else:
        degradation = None
    train_dataset = getattr(dataset_tool, args.data_type)(train=True,  ambient=True, degradation=degradation, gt=args.gt, inpainting=args.inpainting, **args.data_args)
    test_dataset  = getattr(dataset_tool, args.data_type)(train=False, ambient=True, degradation=degradation, gt=args.gt, inpainting=args.inpainting, **args.data_args)
    test_dataset_clean  = getattr(dataset_tool, args.data_type)(train=False, **args.data_args)
    train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=4)
    test_dataloader  = torch.utils.data.DataLoader(test_dataset,  batch_size=args.batch_size, shuffle=True, num_workers=4)

    post_model = getattr(flow_model, args.post_model_type)(args.input_shape, cond_shape=args.input_shape, device=device, **args.post_model_args)

    print(f"post_model parameters:{sum(p.numel() for p in post_model.parameters() if p.requires_grad)}")
    
    post_optimizer = torch.optim.Adam(post_model.trainable_parameters, lr=args.lr_post)

    save_folder = utils.setup_saver(args.results_dir,  f'-{post_model.identifier}' + f'-lr{args.lr_post}' + f'-bits{args.num_bits}-{args.degradation_type}')
    print0(args.__dict__, file=open(f'{save_folder}/config.txt', 'w'))
    sys.stdout = utils.Logger(save_folder+'/log.txt')
    print0(args, flush=True)


    noisies = [train_dataset[i][0].detach().numpy() for i in range(16)]
    utils.save_images(np.stack([test_dataset_clean[i][0].detach().numpy() for i in range(16)]), f'{save_folder}/reals.png', imrange=[-0.5,0.5])
    utils.save_images(noisies, f'{save_folder}/noisies.png', imrange=[-0.5, 0.5])

    print("Training posterior model")

    y_tests = [test_dataset[i][0] for i in range(4)]
    start_time = time.time()
    idx = 0
    dloader = iter(train_dataloader)
    while True:
        try:    y, _, gt_x, mask = next(dloader)
        except StopIteration: 
            dloader = iter(train_dataloader)
            y, _, gt_x, mask = next(dloader)

        y = y.to(device)
        gt_x = gt_x.to(device)
        post_model = post_model.to(device)
        post_optimizer.zero_grad()
        if args.inpainting and args.gt:
            loss, x_posts = post_model.get_loss(y, degradation, gt_x=gt_x, mask=mask, num_z=args.num_z, reg_parameter=args.reg_parameter, num_bits=args.num_bits)
        elif args.inpainting:
            loss, x_posts = post_model.get_loss(y, degradation, mask=mask, num_z=args.num_z, reg_parameter=args.reg_parameter, num_bits=args.num_bits)
        elif args.gt:
            loss, x_posts = post_model.get_loss(y, degradation, gt_x=gt_x, num_z=args.num_z, reg_parameter=args.reg_parameter, num_bits=args.num_bits)
        else:
            loss, x_posts = post_model.get_loss(y, degradation, num_z=args.num_z, reg_parameter=args.reg_parameter, num_bits=args.num_bits)
        
        def total_variation_loss(x):
            # 计算梯度
            dx = torch.abs(x[:, :, 1:, :] - x[:, :, :-1, :])
            dy = torch.abs(x[:, :, :, 1:] - x[:, :, :, :-1])
            # 计算L1范数
            tv_loss = torch.sum(dx) + torch.sum(dy)
            return tv_loss
        tv = total_variation_loss(x_posts)

        loss += args.reg_tv*tv
        loss.backward()
        
        warmup_lr = args.lr_post * min(1, idx * args.batch_size / (10000 * 10))
        post_optimizer.param_groups[0]["lr"] = warmup_lr
        post_optimizer.step()

        if idx % 50 == 0:
            timesec = time.time() - start_time
            timesec = str(datetime.timedelta(seconds=int(timesec)))
            print0(f"kImg. : {idx*args.batch_size/1000:.2f}, time : {timesec} Curr. loss : {loss}")
        if idx % 500 == 0:
            xsamps = []
            for yt in y_tests:
                yt = yt.reshape(1,*yt.shape).to(device)
                x_samp = post_model.sample(4, yt, temp=1)
                xsamps.append(x_samp)
            xsamps = np.concatenate(xsamps, axis=0)
            utils.save_images(xsamps, f'{save_folder}/fakes_{(idx*args.batch_size//1000):06}.png', imrange=[-0.5,0.5])
            post_model.save(f'{save_folder}/network_{(idx*args.batch_size//1000):06}.pt')
            torch.save(post_model.state_dict(), f'{save_folder}/cond_network_{(idx*args.batch_size//1000):06}.pt')

        idx += 1
        if idx >= args.num_iters_post:
            break

if __name__=="__main__":

    parser = argparse.ArgumentParser()

    parser.add_argument("--log_path", type=str, default='/home/baiweimin/yifei/flow-diff/log/flow')

    # Multiprocessing arguments
    parser.add_argument('-n', '--nodes', default=1, type=int, metavar='N')
    parser.add_argument('-g', '--gpus', default=1, type=int, help='number of gpus per node')
    parser.add_argument('-nr', '--nr', default=0, type=int, help='ranking within the nodes')
    parser.add_argument('--gpu', default=1, type=int, help='gpu to operate on')

    # training arguments
    parser.add_argument("--num_iters", type=int, default=10000)
    parser.add_argument("--num_iters_post", type=int, default=10000)
    parser.add_argument("--num_iters_main", type=int, default=10000)
    parser.add_argument("--batch_size", type=int, default=8)
    parser.add_argument("--lr_post", type=float, default=1e-06)
    parser.add_argument("--num_z", type=int, default=10)
    parser.add_argument("--reg_parameter", type=float, default=1e-03)
    parser.add_argument("--results_dir", type=str, default='/home/baiweimin/yifei/flow-diff/results/flow')
    parser.add_argument("--resume_from", type=str, default='')
    parser.add_argument("--reg_tv", type=float, default=0)
    parser.add_argument("--gt", action='store_true')

    # model arguments
    parser.add_argument("--input_shape", type=int, nargs='+', default=[3, 32, 32])
    parser.add_argument("--post_model_type", type=str, default='CondConvINN')
    parser.add_argument("--post_model_args", type=ast.literal_eval, default={'num_conv_layers':[4, 12], 'num_fc_layers':[4]})
    parser.add_argument("--post_actnorm", type=lambda b:bool(int(b)), help="0 or 1")

    # data args
    parser.add_argument("--data_type", type=str, default='MNISTDataset')
    parser.add_argument("--data_args", type=ast.literal_eval, default={'power_of_two': True})
    parser.add_argument("--degradation_type", type=str, default='GaussianNoise')
    parser.add_argument("--degradation_args", type=ast.literal_eval, default={'mean':0., 'std':0.3})
    parser.add_argument("--inpainting", action='store_true')
    parser.add_argument("--num_bits", type=int, default=0)

    args = parser.parse_args()
    train(args)