## Standard libraries
import os
import math
import time
import numpy as np
import argparse

## Imports for plotting
import matplotlib.pyplot as plt
from matplotlib.colors import to_rgb
import matplotlib

## Progress bar
from tqdm import tqdm

## PyTorch
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as data
import torch.optim as optim
# Torchvision
import torchvision
from torchvision.datasets import MNIST
from torchvision import transforms

from model import get_model, get_multiscale_flow
from torch.utils.tensorboard import SummaryWriter

import time
import datetime

def train(args):

	# Setting the seed
	seed = 42
	torch.manual_seed(seed)
	torch.cuda.manual_seed(seed)
	torch.cuda.manual_seed_all(seed)
	np.random.seed(seed)
	# random.seed(seed)

	# Ensure that all operations are deterministic on GPU (if used) for reproducibility
	torch.backends.cudnn.deterministic = True
	torch.backends.cudnn.benchmark = False

	# Fetching the device that will be used throughout this notebook
	device = torch.device("cuda:1")
	print("Using device", device)



	# Convert images from 0-1 to 0-255 (integers)
	def discretize(sample):
		return (sample * 255).to(torch.int32)

	# Transformations applied on each image => make them a tensor and discretize
	transform = transforms.Compose([transforms.ToTensor(),
									discretize])

	# Loading the training dataset. We need to split it into a training and validation part
	train_dataset = MNIST(root=args.data_path, train=True, transform=transform, download=False)
	train_set, val_set = torch.utils.data.random_split(train_dataset, [50000, 10000])

	# Loading the test set
	test_set = MNIST(root=args.data_path, train=False, transform=transform, download=False)

	# We define a set of data loaders that we can use for various purposes later.
	# Note that for actually training a model, we will use different data loaders
	# with a lower batch size.
	train_loader = data.DataLoader(train_set, batch_size=args.batch_size, shuffle=False, drop_last=False)
	val_loader = data.DataLoader(val_set, batch_size=64, shuffle=False, drop_last=False, num_workers=4)
	test_loader = data.DataLoader(test_set, batch_size=64, shuffle=False, drop_last=False, num_workers=4)

	writer = SummaryWriter(args.results_dir)
	# model = get_model(device=device)
	model = get_multiscale_flow(device=device)
	model = model.to(device)
	optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)

	print(f"post_model parameters:{sum(p.numel() for p in model.parameters() if p.requires_grad)}")

	start_time = time.time()
	idx = 0
	for _ in range(args.epoch):
		for imgs,label in tqdm(train_loader):

			imgs = imgs.to(device)
			optimizer.zero_grad()
			loss = model._get_likelihood(imgs)
			loss.backward()
			optimizer.step()

			writer.add_scalar("loss", loss, idx)
			idx+=1

			if idx%50 == 0:
				timesec = time.time() - start_time
				timesec = str(datetime.timedelta(seconds=int(timesec)))
				print(f"kImg. : {idx*256/1000:.2f}, time : {timesec} Curr. loss : {loss}")
				sample = model.sample(img_shape=[16,8,7,7])
				# print(sample)
				torchvision.utils.save_image(sample/255, args.results_dir+str(idx)+".png")
			if idx%500 == 0:
				torch.save(model, args.results_dir+str(idx)+".pth")

if __name__ == '__main__':
	parser = argparse.ArgumentParser()

	# training arguments
	parser.add_argument("--epoch", type=int, default=10000)
	parser.add_argument("--batch_size", type=int, default=512)
	parser.add_argument("--lr", type=float, default=2e-4)
	parser.add_argument("--results_dir", type=str, default='/home/baiweimin/yifei/flow-diff/results/tutorial/')

	# data args
	parser.add_argument("--data_path", type=str, default='./data')

	args = parser.parse_args()
	train(args)