# -*- coding: utf-8 -*-
# @Time    : 2021/4/19
# @File    : train.py
import glob
import os
import random
import sys

from PIL import Image
from torch.optim import Adam
from torch.utils.data import DataLoader
from torchvision import datasets
from torchvision.utils import save_image

from models import TransformerNet, VGG16
from tool.utils import *

dataset_path = './dataset'
style_image = './images/style/starry.jpg'
epoch_size = 1
batch_size = 1
image_size = (256, 256)
style_size = (256, 256)
alpha = 1e5
beta = 1e10
learning_rate = 1e-3
checkpoint_path = './checkpoints'
checkpoint_interval = 1000
sample_interval = 1000
# dataloader, dataset
train_dataset = datasets.ImageFolder(dataset_path, train_transform(image_size))
dataloader = DataLoader(train_dataset, batch_size=batch_size)

dataset_len = len(dataloader)

if __name__ == "__main__":
	style_name = style_image.split("/")[-1].split(".")[0]
	os.makedirs(f"images/outputs/{style_name}-training", exist_ok=True)
	os.makedirs(f"checkpoints/{style_name}", exist_ok=True)
	device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

	# 定义网络
	transformer_net = TransformerNet().to(device)
	vgg = VGG16(requires_grad=False).to(device)

	# 优化函数和损失函数
	optimizer = Adam(transformer_net.parameters(), learning_rate)
	l2_loss = torch.nn.MSELoss().to(device)

	# 加载风格图片并用style_transform预处理
	style = style_transform(style_size)(Image.open(style_image))
	style = style.repeat(batch_size, 1, 1, 1).to(device)

	# 提取风格图片的特征，存到gram矩阵
	style_features = vgg(style)
	style_gram_matrix = [gram_matrix(y) for y in style_features]

	# 8个样本图像，用于模型视觉评估
	image_samples = []
	for path in random.sample(glob.glob(f"{dataset_path}/*/*.jpg"), 8):
		image_samples += [style_transform(image_size=image_size)(Image.open(path))]
	image_samples = torch.stack(image_samples)


	def save_sample_images(batches_done):
		""" 保存checkpoint图片 """
		transformer_net.eval()
		with torch.no_grad(): output = transformer_net(image_samples.to(device))
		image_grid = denormalize(torch.cat((image_samples.cpu(), output.cpu()), 2))
		save_image(image_grid, f'images/outputs/{style_name}-training/{batches_done}.jpg', nrow=4)
		transformer_net.train()


	for epoch in range(epoch_size):
		epoch_metrics = {"content": [], "style": [], "total": []}
		for batch_i, (images, _) in enumerate(dataloader):
			optimizer.zero_grad()

			images_original = images.to(device)
			images_transformed = transformer_net(images_original)

			features_original = vgg(images_original)
			features_transformed = vgg(images_transformed)

			# Compute content loss as MSE between features
			content_loss = alpha * l2_loss(features_transformed.relu2_2, features_original.relu2_2)

			# Compute style loss as MSE between gram matrices
			style_loss = 0
			for ft_y, gm_s in zip(features_transformed, style_gram_matrix):
				gm_y = gram_matrix(ft_y)
				style_loss += l2_loss(gm_y, gm_s[: images.size(0), :, :])
			style_loss *= beta

			total_loss = content_loss + style_loss
			total_loss.backward()
			optimizer.step()

			epoch_metrics["content"] += [content_loss.item()]
			epoch_metrics["style"] += [style_loss.item()]
			epoch_metrics["total"] += [total_loss.item()]

			batches_done = epoch * dataset_len + batch_i + 1

			if batches_done % sample_interval == 0: save_sample_images(batches_done)

			if checkpoint_interval > 0 and batches_done % checkpoint_interval == 0:
				style_name = os.path.basename(style_image).split(".")[0]
				# 保存训练状态
				checkpoint = {
					'epoch': epoch,
					'model': transformer_net.state_dict(),
					'optimizer': optimizer.state_dict(),
				}
				torch.save(checkpoint, f"checkpoints/{style_name}/{style_name}{batches_done}.pth", _use_new_zipfile_serialization=True)

			if batch_i % 50 == 0 or batch_i == dataset_len / batch_size:
				sys.stdout.write("[\033[5;35mEpoch\033[0m %d/\033[1;33m%d\033[0m]\t"
								 "[\033[5;35mBatch\033[0m %d/\033[1;34m%d\033[0m]\t"
								 "[\033[5;35mContent\033[0m %10.2f/(\033[1;31m%10.2f\033[0m)]\t"
								 "[\033[5;35mStyle\033[0m %10.2f/(\033[1;33m%10.2f\033[0m)]\t"
								 "[\033[5;35mTotal\033[0m %10.2f/(\033[1;36m%10.2f\033[0m)]\n"
								 % (
									 epoch + 1,
									 epoch_size,
									 batch_i,
									 dataset_len,
									 content_loss.item(),
									 np.mean(epoch_metrics["content"]),
									 style_loss.item(),
									 np.mean(epoch_metrics["style"]),
									 total_loss.item(),
									 np.mean(epoch_metrics["total"]),
								 ))
