import argparse
import logging
import ast
import sys
import os
import time
import datetime

import torch
import torch.optim
import numpy as np

import model
import dataset
import utils
import degradations

from torch.utils.tensorboard import SummaryWriter


def train(args):    
    device = f'cuda:{args.gpu}'
    torch.manual_seed(0)
    # logger.debug(device)


    def print0(*args, **kwargs):
        print(*args, **kwargs)
    
    print(args.batch_size)
    args.data_args['input_shape'] = args.input_shape
    degradation = getattr(degradations, args.degradation_type)(**args.degradation_args, input_shape=args.input_shape, num_bits=args.num_bits)
    train_dataset = getattr(dataset, args.data_type)(train=True, ambient=True, degradation=degradation, gt=args.gt, inpainting=args.inpainting,  **args.data_args)
    test_dataset  = getattr(dataset, args.data_type)(train=False, ambient=True, degradation=degradation, gt=args.gt, inpainting=args.inpainting, **args.data_args)
    train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=4)
    test_dataloader  = torch.utils.data.DataLoader(test_dataset,  batch_size=args.batch_size, shuffle=True, num_workers=4)

    post_model = getattr(model, args.post_model_type)(args.input_shape, device=device, **args.post_model_args)

    save_folder = utils.setup_saver(args.results_dir,  f'-{post_model.identifier}' + f'-lr{args.lr_post}' + f'-bits{args.num_bits}-{args.degradation_type}')
    print0(args.__dict__, file=open(f'{save_folder}/config.txt', 'w'))
    sys.stdout = utils.Logger(save_folder+'/log.txt')
    
    print0(f"post_model parameters:{sum(p.numel() for p in post_model.parameters() if p.requires_grad)}")
    
    post_optimizer = torch.optim.Adam(post_model.trainable_parameters, lr=args.lr_post)

    
    print0(args, flush=True)
    writer = SummaryWriter(save_folder)


    noisies = [test_dataset[i][0].detach().numpy() for i in range(16)]
    # utils.save_images(np.stack([test_dataset[i][1].detach().numpy() for i in range(16)]), f'{save_folder}/reals.png', imrange=[-0.5,0.5])
    utils.save_images(noisies, f'{save_folder}/noisies.png', imrange=[-0.5, 0.5])

    print("Training posterior model")

    y_tests = [test_dataset[i][0] for i in range(4)]
    start_time = time.time()
    idx = 0
    dloader = iter(train_dataloader)
    while True:
        try:    y, x= next(dloader)
        except StopIteration: 
            dloader = iter(train_dataloader)
            y, x = next(dloader)

        y = y.to(device)
        x = x.to(device)
        post_model = post_model.to(device)
        post_optimizer.zero_grad()
        if args.data_type == "MNISTDataset":
            z, log_jac_det = post_model(y)
        else:
            z, log_jac_det = post_model(x)
        # print(z)
        # print(jac)
        # print((0.5*torch.nn.functional.mse_loss(z, torch.zeros_like(z), reduce="sum")).shape)
        # print(log_jac_det.shape)
        # exit(0)
        loss = 0.5*torch.nn.functional.mse_loss(z, torch.zeros_like(z)) - torch.mean(log_jac_det) / np.prod(args.input_shape)
        writer.add_scalar('loss', loss, idx)
        writer.add_scalar('jac', torch.mean(log_jac_det) / np.prod(args.input_shape), idx)
        writer.add_scalar('z', 0.5*torch.nn.functional.mse_loss(z, torch.zeros_like(z)), idx)
        # def total_variation_loss(x):
        #     # 计算梯度
        #     dx = torch.abs(x[:, :, 1:, :] - x[:, :, :-1, :])
        #     dy = torch.abs(x[:, :, :, 1:] - x[:, :, :, :-1])
        #     # 计算L1范数
        #     tv_loss = torch.sum(dx) + torch.sum(dy)
        #     return tv_loss
        # tv = total_variation_loss(x_posts)

        # loss += args.reg_tv*tv
        loss.backward()
        
        warmup_lr = args.lr_post * min(1, idx * args.batch_size / (10000 * 10))
        post_optimizer.param_groups[0]["lr"] = warmup_lr
        post_optimizer.step()

        if idx % 50 == 0:
            timesec = time.time() - start_time
            timesec = str(datetime.timedelta(seconds=int(timesec)))
            print0(f"kImg. : {idx*args.batch_size/1000:.2f}, time : {timesec} Curr. loss : {loss}")
        if idx % 500 == 0:
            xsamps = []
            for yt in y_tests:
                yt = yt.reshape(1,*yt.shape).to(device)
                x_samp = post_model.sample(4, temp=1)
                xsamps.append(x_samp)
            xsamps = np.concatenate(xsamps, axis=0)
            utils.save_images(xsamps, f'{save_folder}/fakes_{(idx*args.batch_size//1000):06}.png', imrange=[-0.5,0.5])
            post_model.save(f'{save_folder}/network_{(idx*args.batch_size//1000):06}.pt')
            torch.save(post_model.state_dict(), f'{save_folder}/cond_network_{(idx*args.batch_size//1000):06}.pt')

        idx += 1
        if idx >= args.num_iters_post:
            break

if __name__=="__main__":

    parser = argparse.ArgumentParser()

    parser.add_argument("--log_path", type=str, default='/home/baiweimin/yifei/flow-diff/log/flow')

    # Multiprocessing arguments
    parser.add_argument('-n', '--nodes', default=1, type=int, metavar='N')
    parser.add_argument('-g', '--gpus', default=1, type=int, help='number of gpus per node')
    parser.add_argument('-nr', '--nr', default=0, type=int, help='ranking within the nodes')
    parser.add_argument('--gpu', default=1, type=int, help='gpu to operate on')

    # training arguments
    parser.add_argument("--num_iters", type=int, default=10000)
    parser.add_argument("--num_iters_post", type=int, default=10000)
    parser.add_argument("--num_iters_main", type=int, default=10000)
    parser.add_argument("--batch_size", type=int, default=8)
    parser.add_argument("--lr_post", type=float, default=1e-06)
    parser.add_argument("--num_z", type=int, default=10)
    parser.add_argument("--reg_parameter", type=float, default=1e-03)
    parser.add_argument("--results_dir", type=str, default='/home/baiweimin/yifei/flow-diff/results/flow')
    parser.add_argument("--resume_from", type=str, default='')
    parser.add_argument("--reg_tv", type=float, default=0)
    parser.add_argument("--gt", action='store_true')
    parser.add_argument("--no_con", action='store_true')

    # model arguments
    parser.add_argument("--input_shape", type=int, nargs='+', default=[3, 32, 32])
    parser.add_argument("--post_model_type", type=str, default='CondConvINN')
    parser.add_argument("--post_model_args", type=ast.literal_eval, default={'num_conv_layers':[4, 12], 'num_fc_layers':[4]})
    parser.add_argument("--post_actnorm", type=lambda b:bool(int(b)), help="0 or 1")

    # data args
    parser.add_argument("--data_type", type=str, default='MNISTDataset')
    parser.add_argument("--data_args", type=ast.literal_eval, default={'power_of_two': True})
    parser.add_argument("--degradation_type", type=str, default='GaussianNoise')
    parser.add_argument("--degradation_args", type=ast.literal_eval, default={'mean':0., 'std':0.3})
    parser.add_argument("--inpainting", action='store_true')
    parser.add_argument("--num_bits", type=int, default=0)

    args = parser.parse_args()
    train(args)