import argparse
import logging
import ast
import sys
import os
import time
import datetime

import torch
import torch.optim
import numpy as np
import torch.nn as nn
from math import log

import realnvp
import dataset
import utils
import degradations

import torchvision
from torch.utils.tensorboard import SummaryWriter

def calc_loss(log_p, logdet, input_shape, n_bins):
	n_pixel = input_shape[0]*input_shape[1]*input_shape[2]

	loss = -log(n_bins)*n_pixel
	loss = loss + logdet + log_p

	return ((-loss / (log(2)*n_pixel)).mean(), (log_p / (log(2)*n_pixel)).mean(), (logdet / (log(2)*n_pixel)).mean())

def train(args):    
	device = f'cuda:{args.gpu}'
	torch.manual_seed(0)
	# logger.debug(device)

	n_bins = 2**args.num_bits
	ndim = args.input_shape[0]*args.input_shape[1]*args.input_shape[2]
	def print0(*args, **kwargs):
		print(*args, **kwargs)
	
	print(args.batch_size)
	args.data_args['input_shape'] = args.input_shape
	degradation = getattr(degradations, args.degradation_type)(**args.degradation_args, input_shape=args.input_shape, num_bits=args.num_bits)
	train_dataset = getattr(dataset, args.data_type)(train=True, ambient=True, degradation=degradation, gt=args.gt, inpainting=args.inpainting, num_bits=args.num_bits,  **args.data_args)
	test_dataset  = getattr(dataset, args.data_type)(train=False, ambient=True, degradation=degradation, gt=args.gt, inpainting=args.inpainting, num_bits=args.num_bits, **args.data_args)
	train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=4)
	test_dataloader  = torch.utils.data.DataLoader(test_dataset,  batch_size=args.batch_size, shuffle=True, num_workers=4)

	post_model = getattr(realnvp, args.post_model_type)(ndim=ndim, n_flow=3)
	post_model = post_model.to(device)
	

	save_folder = utils.setup_saver(args.results_dir,  'glow' + f'-lr{args.lr_post}' + f'-bits{args.num_bits}-{args.degradation_type}')
	print0(args.__dict__, file=open(f'{save_folder}/config.txt', 'w'))
	sys.stdout = utils.Logger(save_folder+'/log.txt')
	
	print0(f"post_model parameters:{sum(p.numel() for p in post_model.parameters() if p.requires_grad)}")
	
	post_optimizer = torch.optim.Adam(post_model.parameters(), lr=args.lr_post)

	
	print0(args, flush=True)
	writer = SummaryWriter(save_folder)
	# images = torch.randn(args.batch_size,3,64,64).to(device)
	# writer.add_graph(post_model, input_to_model=images)


	noisies = [test_dataset[i][0].detach().numpy() for i in range(16)]
	# utils.save_images(np.stack([test_dataset[i][1].detach().numpy() for i in range(16)]), f'{save_folder}/reals.png', imrange=[-0.5,0.5])
	utils.save_images(noisies, f'{save_folder}/noisies.png', imrange=[-0.5, 0.5])

	print("Training posterior model")

	y_tests = [test_dataset[i][0] for i in range(4)]
	start_time = time.time()
	idx = 0
	dloader = iter(train_dataloader)
	while True:
		try:    y, x= next(dloader)
		except StopIteration: 
			dloader = iter(train_dataloader)
			y, x = next(dloader)

		y = y.to(device)
		x = x.to(device)
		# y = y*255
		# if args.num_bits<8:
		# 	y = torch.floor(y/2**(8-args.num_bits)) 
		# y = y / n_bins -0.5
		# post_model = post_model.to(device)
		post_optimizer.zero_grad()
		if args.data_type == "MNISTDataset":
			log_p_sum, logdet, z_outs = post_model(y)
		else:
			log_p_sum, logdet, z_outs = post_model(y)
		print(f"max: {torch.max(y)}")
		print(f"min: {torch.min(y)}")

		# print(z)
		# print(jac)
		# print((0.5*torch.nn.functional.mse_loss(z, torch.zeros_like(z), reduce="sum")).shape)
		# print(log_jac_det.shape)
		# exit(0)
		# print(log_p_sum.shape)
		# print(logdet.shape)

		logdet = logdet.mean()
		loss, log_p, log_det = calc_loss(log_p_sum, logdet, args.input_shape, n_bins=n_bins)
		loss = (torch.mean(log_p_sum) - torch.mean(logdet)) / np.prod(args.input_shape)
		writer.add_scalar('loss', loss, idx)
		writer.add_scalar('logdet', log_det, idx)
		writer.add_scalar('log_p', log_p, idx)
		# def total_variation_loss(x):
		#     # 计算梯度
		#     dx = torch.abs(x[:, :, 1:, :] - x[:, :, :-1, :])
		#     dy = torch.abs(x[:, :, :, 1:] - x[:, :, :, :-1])
		#     # 计算L1范数
		#     tv_loss = torch.sum(dx) + torch.sum(dy)
		#     return tv_loss
		# tv = total_variation_loss(x_posts)

		# loss += args.reg_tv*tv
		loss.backward()
		
		# warmup_lr = args.lr_post * min(1, idx * args.batch_size / (10000 * 10))
		# post_optimizer.param_groups[0]["lr"] = warmup_lr
		nn.utils.clip_grad_norm_(post_model.parameters(), max_norm=1.0)
		post_optimizer.step()

		if idx % 50 == 0:
			timesec = time.time() - start_time
			timesec = str(datetime.timedelta(seconds=int(timesec)))
			print0(f"kImg. : {idx*args.batch_size/1000:.2f}, time : {timesec} Curr. loss : {loss}")
		if idx % 500 == 0:
			# xsamps = []
			# for yt in y_tests:
			#     yt = yt.reshape(1,*yt.shape).to(device)
			#     x_samp = post_model.sample(4, temp=1)
			#     xsamps.append(x_samp)
			z_sample = []
			for z in z_shapes:
				z_new = torch.randn(4, *z)
				z_sample.append(z_new.to(device))
			img_samp, _ = post_model.reverse(z_sample)
			torchvision.utils.save_image(img_samp+0.5, f'{save_folder}/fakes_{(idx*args.batch_size//1000):06}.png')
			# utils.save_images(xsamps, f'{save_folder}/fakes_{(idx*args.batch_size//1000):06}.png', imrange=[-0.5,0.5])
			# post_model.save(f'{save_folder}/network_{(idx*args.batch_size//1000):06}.pt')
			torch.save(post_model.state_dict(), f'{save_folder}/cond_network_{(idx*args.batch_size//1000):06}.pt')

		idx += 1
		if idx >= args.num_iters_post:
			break

if __name__=="__main__":

	parser = argparse.ArgumentParser()

	parser.add_argument("--log_path", type=str, default='/home/baiweimin/yifei/flow-diff/log/flow')

	# Multiprocessing arguments
	parser.add_argument('-n', '--nodes', default=1, type=int, metavar='N')
	parser.add_argument('-g', '--gpus', default=1, type=int, help='number of gpus per node')
	parser.add_argument('-nr', '--nr', default=0, type=int, help='ranking within the nodes')
	parser.add_argument('--gpu', default=1, type=int, help='gpu to operate on')

	# training arguments
	parser.add_argument("--num_iters", type=int, default=10000)
	parser.add_argument("--num_iters_post", type=int, default=10000)
	parser.add_argument("--num_iters_main", type=int, default=10000)
	parser.add_argument("--batch_size", type=int, default=8)
	parser.add_argument("--lr_post", type=float, default=1e-06)
	parser.add_argument("--num_z", type=int, default=10)
	parser.add_argument("--reg_parameter", type=float, default=1e-03)
	parser.add_argument("--results_dir", type=str, default='/home/baiweimin/yifei/flow-diff/results/flow')
	parser.add_argument("--resume_from", type=str, default='')
	parser.add_argument("--reg_tv", type=float, default=0)
	parser.add_argument("--gt", action='store_true')
	parser.add_argument("--no_con", action='store_true')

	# model arguments
	parser.add_argument("--input_shape", type=int, nargs='+', default=[3, 32, 32])
	parser.add_argument("--post_model_type", type=str, default='CondConvINN')
	parser.add_argument("--post_model_args", type=ast.literal_eval, default={'num_conv_layers':[4, 12], 'num_fc_layers':[4]})
	parser.add_argument("--post_actnorm", type=lambda b:bool(int(b)), help="0 or 1")

	# data args
	parser.add_argument("--data_type", type=str, default='MNISTDataset')
	parser.add_argument("--data_args", type=ast.literal_eval, default={'power_of_two': True})
	parser.add_argument("--degradation_type", type=str, default='GaussianNoise')
	parser.add_argument("--degradation_args", type=ast.literal_eval, default={'mean':0., 'std':0.3})
	parser.add_argument("--inpainting", action='store_true')
	parser.add_argument("--num_bits", type=int, default=0)

	args = parser.parse_args()
	train(args)