import os
from typing import Dict, Any

import numpy as np
import torch
import torchvision

from torch import nn, Tensor, float32
from torchvision import transforms as T
from torchvision.models import VGG
from torch.nn import functional as F, Parameter

from PIL import Image
from tqdm import tqdm

HAS_CUDA = torch.cuda.is_available()
device = torch.device("cuda" if HAS_CUDA else "cpu")


class transferer(nn.Module):
    _layer_output_dict: Dict[Any, Any]
    _vgg: VGG

    def __init__(self, layers=(3, 9, 18, 27, 36), w_list=None):
        super(transferer, self).__init__()
        self._vgg = torchvision.models.vgg19(pretrained=True)
        self._vgg.eval()
        self.layers = layers
        self.w_list = w_list if w_list else {layer: 0.2 for layer in layers}
        self._layer_output_dict = {}
        # hook up for the outputs in layers of vgg
        self._hook_up(layers)

    def make_style_content(self, style_img=None, content_img=None):
        self._vgg(style_img)
        self.si_vgg_output = self._layer_output_dict
        self._layer_output_dict = {}
        self._vgg(content_img)
        self.ci_vgg_output = self._layer_output_dict
        self._layer_output_dict = {}

    def forward(self, img: Tensor):
        self._vgg(img)
        img_vgg_output = self._layer_output_dict
        self._layer_output_dict = {}
        return img_vgg_output

    def _get_hook(self, layer):
        def hook(model, input, output):
            self._layer_output_dict[layer] = output

        return hook

    def _hook_up(self, layers):
        for layer in layers:
            assert isinstance(self._vgg.features[layer], nn.Module)
            self._vgg.features[layer].register_forward_hook(self._get_hook(layer))

    def _style_loss(self, img_vgg_output: dict, si_vgg_output: dict):
        # not so much loop, I can do it in python-builtins
        layers_style_losses = []
        for layer in img_vgg_output:
            img_layer_output = img_vgg_output[layer]
            si_layer_output = si_vgg_output[layer]
            shape = img_vgg_output[layer].shape
            img_style_representation = cal_style_representation(img_layer_output.reshape(shape[-3:]))
            si_style_representation = cal_style_representation(si_layer_output.reshape(shape[-3:]))
            layer_style_loss = F.mse_loss(img_style_representation, si_style_representation)
            layer_style_loss *= self.w_list[layer] / (2 * shape[0] * shape[1] * shape[2]) ** 2
            layers_style_losses.append(layer_style_loss)
        return sum(layers_style_losses)

    def _content_loss(self, img_vgg_output: dict, ci_vgg_output: dict):
        last_layer = max(img_vgg_output)
        content_loss = F.mse_loss(img_vgg_output[last_layer], ci_vgg_output[last_layer]) / 2
        return content_loss

    def loss_func(self, img_vgg_output, alpha, beta):
        return self._content_loss(img_vgg_output, self.ci_vgg_output) * alpha \
               + self._style_loss(img_vgg_output, self.si_vgg_output) * beta


def cal_style_representation(feature_plains: Tensor):
    assert len(feature_plains.shape) == 3, 'this function only calculates one image\'s features'

    style_mat: Tensor = feature_plains.reshape((feature_plains.shape[0], -1))
    return torch.matmul(style_mat, style_mat.t())


if __name__ == '__main__':
    SHOW = False
    IMAGE_PATH = r'../origin_images'
    CONTENT_IMG = r'schoolpic1.jpg'
    STYLE_IMG = r'stars.jpg'
    RESULT_DIR_PATH = os.path.join('../results', CONTENT_IMG + '&' + STYLE_IMG)
    LAYERS = (1, 6, 11, 20, 29)

    if not os.path.exists(RESULT_DIR_PATH):
        os.mkdir(RESULT_DIR_PATH)
    content_img = Image.open(os.path.join(IMAGE_PATH, CONTENT_IMG)).convert('RGB')
    style_img = Image.open(os.path.join(IMAGE_PATH, STYLE_IMG)).convert('RGB')
    content_img.save(os.path.join(RESULT_DIR_PATH, 'origin_content.jpg'))
    style_img.save(os.path.join(RESULT_DIR_PATH, 'origin_style.jpg'))
    origin_shape = content_img.size

    if SHOW:
        style_img.show()
        content_img.show()
    transform = T.Compose([
        T.Resize(224),
        T.CenterCrop(224),
        T.ToTensor(),
    ])
    normalizer = T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])

    si = transform(style_img).to(device)
    ci = transform(content_img).to(device)

    si=normalizer(si)
    ci=normalizer(ci)

    si = si.reshape(1, *si.shape)
    ci = ci.reshape(1, *ci.shape)

    # generate from content img
    org_img = transform(content_img).to(device)
    # generate from noise
    # org_img = torch.randn((3, 224, 224), requires_grad=True, device=device)
    org_img: Parameter = nn.Parameter(org_img)
    optim = torch.optim.Adam([org_img])

    # 将整体内存空间（包括成员变量等等）送入cuda
    tf = transferer(LAYERS).to(device)
    tf.make_style_content(si, ci)

    # 开始迭代
    show =T.Compose([
        T.ToPILImage(),
        # 形状是反的
        T.Resize((origin_shape[1],origin_shape[0])),
    ])

    times = 500
    for ii in range(20):
        for i in tqdm(range(times), desc=f'{times} times generating {ii}'):
            img = normalizer(org_img)
            img = img.reshape(1, *img.shape)
            img_vgg_output = tf(img)
            loss = tf.loss_func(img_vgg_output, alpha=0.001, beta=1)
            loss.backward(retain_graph=True)
            optim.step()
            org_img.data.clamp_(0,1)
            optim.zero_grad()
        show(org_img.data).save(os.path.join(RESULT_DIR_PATH, f'{times}iter{ii}.png'))
        print(loss)
    print(loss)
