"""
 Neural style transfer (Leon A. Gatys' method). Transferring the style of a reference image to target image
 using gradient descent.

 .. _references: [A Neural Algorithm of Artistic Style](http://arxiv.org/abs/1508.06576),
                 https://github.com/keras-team/keras-io/blob/master/examples/generative/neural_style_transfer.py
                 https://courses.d2l.ai/zh-v2/assets/notebooks/chapter_computer-vision/neural-style.slides.html#/
"""

import time
import torch
import numpy as np
import torchvision
from torch import nn
from utils import *
from torch.utils.tensorboard import SummaryWriter
from torchvision.transforms import functional as F


def gram_matrix(x):
    """The gram matrix of an image tensor (used to compute the style loss).
    """
    num_channels, n = x.shape[1], x.numel() // x.shape[1]
    x = x.reshape((num_channels, n))
    return torch.matmul(x, x.T) / (num_channels * n)


def extract_features(image, network, content_layers, style_layers):
    """Extract the content features and style features of the image.
    """
    content_features, style_features = [], []
    for i in range(len(network)):
        image = network[i](image)
        if i in style_layers:
            style_features.append(image)
        if i in content_layers:
            content_features.append(image)

    return content_features, style_features


def get_content_data(image, image_shape, network, content_layers, device=None):
    """Load content image preprocessing tensor and features.
    """
    content_x = preprocess_image(image, image_shape).to(device)
    content_y, _ = extract_features(content_x, network, content_layers, [])
    return content_x, content_y


def get_style_data(image, image_shape, network, style_layers, device=None):
    """Load style image preprocessing tensor and features.
    """
    style_x = preprocess_image(image, image_shape).to(device)
    _, style_y = extract_features(style_x, network, [], style_layers)
    return style_x, [gram_matrix(y) for y in style_y]


def content_loss(y_hat, y):
    """Image content loss function. The mean square error (MSE) of each pixel of the generated image and
    the original content image is used as the loss.

    The `content_loss` function, which keeps the high-level representation of the generated image close to
    that of the base image.
    """
    return torch.square(y_hat - y.detach()).mean()


def style_loss(y_hat, gram_y, channels, image_nrows, image_ncols):
    """Image style loss function. The "style loss" is designed to maintain the style of the reference image
    in the generated image. It is based on the gram matrices (which capture style) of feature maps from
    the style reference image and from the generated image.

    The `style_loss` function, which keeps the generated image close to the local textures of
    the style reference image.
    """
    # normalized = 4.0 * (channels ** 2) * ((image_ncols * image_nrows) ** 2)
    return torch.square(gram_matrix(y_hat) - gram_y.detach()).mean()


def total_variation_loss(y_hat):
    """The 3rd loss function, total variation loss, designed to keep the generated image locally coherent.
    """
    return 0.5 * (torch.abs(y_hat[:, :, 1:, :] - y_hat[:, :, :-1, :]).mean() +
                  torch.abs(y_hat[:, :, :, 1:] - y_hat[:, :, :, :-1]).mean())


def compute_loss(x, content_y_hat, content_y, content_weight, style_y_hat, style_gram_y, style_weight, tv_weight,
                 channels, image_nrows, image_ncols):
    """Content loss, style loss and total variational loss are calculated separately, and then
    all losses are summed up.
    """
    content_image_loss = [content_loss(y_hat, y) * content_weight for y_hat, y in zip(content_y_hat, content_y)]
    style_image_loss = [style_loss(y_hat, y_gram, channels, image_nrows, image_ncols) * style_weight for y_hat, y_gram
                        in zip(style_y_hat, style_gram_y)]
    t_variation_loss = total_variation_loss(x) * tv_weight
    final_loss = sum(10 * style_image_loss + content_image_loss + [t_variation_loss])
    return content_image_loss, style_image_loss, t_variation_loss, final_loss


class SynthesizedImage(nn.Module):
    """Synthesized image module."""

    def __init__(self, image_shape, init_image=None):
        super(SynthesizedImage, self).__init__()
        self.weight = nn.Parameter(torch.rand(*image_shape))
        if init_image is not None:
            self.weight.data.copy_(init_image.data)

    def forward(self):
        return self.weight


def get_inits(image_shape, lr, init_image=None, device=None):
    """Gets the generated image and optimizer after initialization.
    """
    generated_image = SynthesizedImage(image_shape, init_image).to(device)
    optimizer = torch.optim.Adam(generated_image.parameters(), lr=lr)
    return generated_image(), optimizer


def train(content_y, style_gram_y, num_epochs, lr, lr_decay_epoch, channels, image_nrows, image_ncols, network,
          content_layers, style_layers, content_weight, style_weight, tv_weight, begin_epoch=0, init_image=None,
          device=None, step=10, work_name=None):
    """The training loop. Repeatedly run vanilla gradient descent steps to minimize the loss, and save the
    resulting image every "step" iterations.

    Args:
        content_y (Tensor): A pre-processed content image tensor.
        style_gram_y (Tensor): Gram matrix tensor of a style image.
        num_epochs (int): The total number of iterations of training.
        lr (float): The learning rate of gradient descent algorithm.
        lr_decay_epoch (int): Period of learning rate decay.
        channels (int): Number of channels for the image, default is 3.
        image_nrows (int): The high of the image.
        image_ncols (int): The width of the image.
        network (nn.Module): Pretraining network for image feature extraction.
        content_layers (sequence): Content feature layer in feature extraction pretraining network.
        style_layers (sequence): Style feature layer in pre-training network of feature extraction.
        content_weight (float): Weight of the content loss function.
        style_weight (float): Weight of style loss function.
        tv_weight (float): Weight of style total variation loss function.
        begin_epoch: Continue the beginning of training epoch.
        init_image (Tensor): Generate the initial weight of the image, the value of the initial pixel point.
        device (device): Style transfer training equipment.
        step (int): Cycle of saving training results.
        work_name (str): Saving the resulting image directory name.
    """
    begin = time.time()
    work_name, output_dir = get_output_config(work_name)
    generated_image, optimizer = get_inits((1, channels, image_nrows, image_ncols), lr, init_image, device)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, lr_decay_epoch, 0.8)
    writer = SummaryWriter(comment=work_name)

    for epoch in range(begin_epoch, num_epochs):
        print(f"epoch {epoch + 1}")
        optimizer.zero_grad()
        content_y_hat, style_y_hat = extract_features(generated_image, network, content_layers, style_layers)
        content_image_loss, style_image_loss, t_variation_loss, final_loss = compute_loss(
            generated_image, content_y_hat, content_y, content_weight, style_y_hat, style_gram_y, style_weight,
            tv_weight, channels, image_nrows, image_ncols)

        final_loss.backward()
        optimizer.step()
        scheduler.step()
        if (epoch + 1) % step == 0:
            writer.add_scalars('loss', {'contents loss': float(sum(content_image_loss)),
                                        'styles loss': float(sum(style_image_loss)),
                                        'total variation loss': float(sum(style_image_loss))}, epoch)
            image = postprocess_image(generated_image)
            writer.add_image("output image", F.to_tensor(image), epoch)
            image.save(f"{output_dir}/{epoch}.jpg")

    end = time.time()
    print(f"Training completed, total time: {end - begin} sec.")
