# -*- coding: utf-8 -*-
# @Time    : 2021/7/7 14:46
# @Author  : LuoTianHang


# ####################train.py 说明##########################
# this script is the core py to train the model

import copy

import torch
import torch.nn.functional as F
from PIL import Image
from torch import nn
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.models import vgg16
from tqdm import tqdm

from config.config import parser
from datalist import StyleDataset
from model import UnetGenerator

content_layers_default = ['conv_4']
style_layers_default = ['conv_1', 'conv_2', 'conv_3', 'conv_4', 'conv_5']
cnn_normalization_mean = torch.tensor([0.485, 0.456, 0.406]).cuda()
cnn_normalization_std = torch.tensor([0.229, 0.224, 0.225]).cuda()

# feature_layers_w = {0: 0.1, 2: 0.1, 5: 0.4, 7: 0.3, 10: 0.1}
feature_layers_w = {0: 1, 2: 1, 5: 1, 7: 1, 10: 1}
from config.config import BATCHSIZE

imsize = 512

infer_transform = transforms.Compose([
    # transforms.Resize((256, 256)),# inference  no need
    transforms.ToTensor(),
    transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))
])


class ContentLoss(nn.Module):
    def __init__(self):
        super(ContentLoss, self).__init__()

    def forward(self, input, target):
        self.loss = F.l1_loss(input, target, reduction="mean")
        return self.loss


def gram_matrix(input):
    a, b, c, d = input.size()

    features = input.view(a * b, c * d)
    G = torch.mm(features, features.t())
    return G.div(a * b * c * d)


class StyleLoss(nn.Module):
    def __init__(self):
        super(StyleLoss, self).__init__()

    def forward(self, input, target):
        S = gram_matrix(target)
        G = gram_matrix(input)
        self.loss = F.mse_loss(G, S, reduction="mean")
        return self.loss


transform = transforms.Compose([
    transforms.Resize([256, 256]),
    transforms.ToTensor(),
    transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))
]
)


def denormalize(im, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)):
    return im * std + mean


def image_loader(image_name):
    image = Image.open(image_name)
    image = image.convert('RGB')
    image = transform(image).unsqueeze(0)
    return image.cuda()


class Train(object):
    def __init__(self):
        self.args = parser.parse_args()
        print(f"-----------{self.args.project_name}-------------")

        use_cuda = self.args.use_cuda and torch.cuda.is_available()
        if use_cuda:
            torch.cuda.manual_seed(self.args.seed)
            torch.cuda.manual_seed_all(self.args.seed)
        else:
            torch.manual_seed(self.args.seed)

        self.device = torch.device('cuda' if use_cuda else 'cpu')

        train_kwargs = {'num_workers': 0, 'pin_memory': True} if use_cuda else {}

        self.train_dataset = StyleDataset()
        self.train_dataloader = DataLoader(self.train_dataset, batch_size=BATCHSIZE, shuffle=True,
                                           **train_kwargs)

        self.style_img = image_loader("data/timg.jpg")

        self.generator = UnetGenerator().cuda()

        # state_dict = torch.load("weights/best.pth")['model_state_dict']
        # self.generator.load_state_dict(state_dict)

        self.dis = []
        self.dis2 = vgg16(pretrained=True).features.eval().cuda()
        for module in self.dis2:
            if isinstance(module, nn.ReLU):
                module = nn.ReLU(inplace=False)
            self.dis.append(module)

        self.StyleLoss = StyleLoss()  # MSELOSS
        self.ContentLoss = ContentLoss()  # MAELOSS
        self.optimizer = torch.optim.Adam(self.generator.parameters(), lr=0.001)

        for i in range(200):
            self.train(i)

            torch.save({
                "model_state_dict": self.generator.state_dict()
            }, "weights/temp/best.pth")
            print("model_saved")

    def train(self, epoch):
        self.generator.train()
        pbar = tqdm(self.train_dataloader)
        for data in pbar:

            style_score = 0
            content_score = 0

            data = data.cuda()
            # data=data.clamp(0,1)
            content_data = copy.deepcopy(data)

            result = self.generator(data)

            target = copy.deepcopy(self.style_img)

            target = target.repeat([BATCHSIZE, 1, 1, 1])
            # region
            output1 = (denormalize(result[0].permute((1, 2, 0)).
                                   detach().
                                   to('cpu').
                                   numpy()) * 255).astype('uint8')
            output1 = Image.fromarray(output1).convert('RGB')
            output2 = (denormalize(content_data[0].permute((1, 2, 0)).
                                   detach().
                                   to('cpu').
                                   numpy()) * 255).astype('uint8')
            output2 = Image.fromarray(output2).convert('RGB')

            output3 = (denormalize(target[0].permute((1, 2, 0)).
                                   detach().
                                   to('cpu').
                                   numpy()) * 255).astype('uint8')
            output3 = Image.fromarray(output3).convert('RGB')

            output = Image.new("RGB", (256 * 3, 256))
            output.paste(output1, (0, 0))
            output.paste(output2, (256, 0))
            output.paste(output3, (512, 0))

            output.save("2.jpg")
            # endregion

            # combine the result ,content, style in on Batch ,and convey them into the vgg model
            # through this way can accelerate the computing speed
            # x = torch.cat([result, content_data, target], dim=0)
            for index, module in enumerate(self.dis):
                content_data = module(content_data)
                result = module(result)
                target = module(target)
                # 0,2,5,7
                if index < 8:
                    if index in [5]:
                        # content_score += self.ContentLoss(x[0:BATCHSIZE, ...], x[BATCHSIZE:2 * BATCHSIZE, ...])
                        content_score += self.ContentLoss(result, content_data)
                    if index in [0, 2, 5, 7]:
                        # style_score += feature_layers_w[index] * self.StyleLoss(x[0:BATCHSIZE, ...], x[-1, ...])
                        # style_score += 1 * self.StyleLoss(x[0:BATCHSIZE, ...], x[-1, ...])
                        style_score += self.StyleLoss(result, target)
                else:
                    break

            loss = 1 * content_score + 10000 * style_score
            self.optimizer.zero_grad()  # do not forget this step
            loss.backward()
            self.optimizer.step()
            pbar.set_description(
                f'Train Epoch:{epoch}'
                f'\tloss:{round(loss.item(), 10)}'
                f'\tcontent_score:{round(1 * content_score.item(), 10)}'
                f'\tstyle_score:{round(10000 * style_score.item(), 10)}'
            )

    # no use
    @torch.no_grad()
    def test(self):
        self.generator.eval()

        data_transform = transforms.Compose([
            transforms.Resize((512, 512)),
            transforms.ToTensor(),
            transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
        ])

        data = Image.open("G:/datasets/VOC/VOC2007/JPEGImages/2007_000256.jpg")

        data = data_transform(data).unsqueeze(0)

        result = self.generator(data.cuda())
        output = (denormalize(result.permute((0, 2, 3, 1)).
                              detach().
                              to('cpu').
                              numpy()) * 255).astype('uint8')
        output = Image.fromarray(output[0]).convert('RGB')
        output.save("2.jpg")


train = Train()
