# -*- coding: utf-8 -*-
# @Time    : 2021/4/19
# @File    : train.py
import glob
import os
import random
import sys

from PIL import Image
from torch.optim import Adam
from torch.utils.data import DataLoader
from torchvision import datasets
from torchvision.utils import save_image

from models import TransformerNet, VGG16
from utils import *

args = {
	'dataset_path': './dataset',
	'style_image': './images/style/C.jpg',
	'epochs': 1,
	'batch_size': 1, # cuda 内存只允许batch size 为1
	'image_size': (256, 340),
	'style_size': (256, 340),
	'lambda_content': 1e5,
	'lambda_style': 1e10,
	'learning_rate': 1e-3,
	'checkpoint_model': None,
	'checkpoint_interval': 2000,
	'sample_interval': 1000,
}

# Create dataloader for the training data
train_dataset = datasets.ImageFolder(args['dataset_path'], train_transform(args['image_size']))
dataloader = DataLoader(train_dataset, batch_size=args['batch_size'])

# 不常用变量
dataset_len = len(dataloader)

if __name__ == "__main__":
	style_name = args['style_image'].split("/")[-1].split(".")[0]
	os.makedirs(f"images/outputs/{style_name}-training", exist_ok=True)
	os.makedirs(f"checkpoints", exist_ok=True)

	device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

	# Defines networks
	transformer = TransformerNet().to(device)
	vgg = VGG16(requires_grad=False).to(device)

	# Load checkpoint model
	if args['checkpoint_model']:
		transformer.load_state_dict(torch.load(args['checkpoint_model']))

	# Define optimizer and loss
	optimizer = Adam(transformer.parameters(), args['learning_rate'])
	l2_loss = torch.nn.MSELoss().to(device)

	# Load style image
	style = style_transform(args['style_size'])(Image.open(args['style_image']))
	style = style.repeat(args['batch_size'], 1, 1, 1).to(device)

	# Extract style features
	features_style = vgg(style)
	gram_style = [gram_matrix(y) for y in features_style]

	# Sample 8 images for visual evaluation of the model
	image_samples = []
	for path in random.sample(glob.glob(f"{args['dataset_path']}/*/*.jpg"), 8):
		image_samples += [style_transform(image_size=args['image_size'])(Image.open(path))]
	# for x in image_samples: print(x.size())
	image_samples = torch.stack(image_samples)

	def save_sample(batches_done):
		""" Evaluates the model and saves image samples """
		transformer.eval()
		with torch.no_grad():
			output = transformer(image_samples.to(device))
		image_grid = denormalize(torch.cat((image_samples.cpu(), output.cpu()), 2))
		save_image(image_grid, f"images/outputs/{style_name}-training/{batches_done}.jpg", nrow=4)
		transformer.train()


	for epoch in range(args['epochs']):
		epoch_metrics = {"content": [], "style": [], "total": []}
		for batch_i, (images, _) in enumerate(dataloader):
			optimizer.zero_grad()

			images_original = images.to(device)
			images_transformed = transformer(images_original)

			# Extract features
			features_original = vgg(images_original)
			features_transformed = vgg(images_transformed)

			# Compute content loss as MSE between features
			content_loss = args['lambda_content'] * l2_loss(features_transformed.relu2_2, features_original.relu2_2)

			# Compute style loss as MSE between gram matrices
			style_loss = 0
			for ft_y, gm_s in zip(features_transformed, gram_style):
				gm_y = gram_matrix(ft_y)
				style_loss += l2_loss(gm_y, gm_s[: images.size(0), :, :])
			style_loss *= args['lambda_style']

			total_loss = content_loss + style_loss
			total_loss.backward()
			optimizer.step()

			epoch_metrics["content"] += [content_loss.item()]
			epoch_metrics["style"] += [style_loss.item()]
			epoch_metrics["total"] += [total_loss.item()]
			if batch_i % 10 == 0 or batch_i == dataset_len:
				sys.stdout.write("\r"
					"[\033[5;35mEpoch\033[0m %d/\033[1;33m%d\033[0m]\t"
					"[\033[5;35mBatch\033[0m %d/\033[1;34m%d\033[0m]\t"
					"[\033[5;35mContent\033[0m %10.2f/(\033[1;31m%10.2f\033[0m)]\t"
					"[\033[5;35mStyle\033[0m %10.2f/(\033[1;33m%10.2f\033[0m)]\t"
					"[\033[5;35mTotal\033[0m %10.2f/(\033[1;36m%10.2f\033[0m)]"
					% (
						epoch + 1,
						args['epochs'],
						batch_i,
						dataset_len,
						content_loss.item(),
						np.mean(epoch_metrics["content"]),
						style_loss.item(),
						np.mean(epoch_metrics["style"]),
						total_loss.item(),
						np.mean(epoch_metrics["total"]),
					)
				)

			batches_done = epoch * dataset_len + batch_i + 1
			if batches_done % args['sample_interval'] == 0:
				save_sample(batches_done)

			if args['checkpoint_interval'] > 0 and batches_done % args['checkpoint_interval'] == 0:
				style_name = os.path.basename(args['style_image']).split(".")[0]
				torch.save(transformer.state_dict(), f"checkpoints/{style_name}_{batches_done}.pth")
