# -*- coding: utf-8 -*-
# !/usr/bin/env python
"""
-------------------------------------------------
   File Name：     train
   Description :   
   Author :       lth
   date：          2022/1/24
-------------------------------------------------
   Change Activity:
                   2022/1/24 12:27: create this script
-------------------------------------------------
this is the core of this project
"""
__author__ = 'lth'

import os
import random

import numpy as np
import torch
from PIL import Image
from colorama import Fore
from torch import nn
from torch import optim
from torch.backends import cudnn
from torch.utils.data import DataLoader
from torchvision import models
from tqdm import tqdm

from config import GetConfig
from datalist import SRDataset
from metric import Metric
from model import Generator, Discriminator
from utils import denormalize, weights_init

global_loss = 99999999


class SRGAN:
    def __init__(self):
        self.args = GetConfig()

        print(f"-----------{self.args.project_name}-------------")
        use_cuda = self.args.use_cuda and torch.cuda.is_available()
        self.device = torch.device("cuda" if use_cuda else "cpu")
        # num_workers的设置数量为GPU的4倍，同时这个参数的提高会增加CPU的内存消耗
        kwargs = {"num_workers": 0, "pin_memory": True} if use_cuda else {"num_workers": 0, "pin_memory": False}

        self.data = self.get_data(self.args.base_dir)
        random.shuffle(self.data)
        self.num_val = int(len(self.data) * self.args.val_per)
        self.num_train = len(self.data) - self.num_val

        self.train_dataloader = DataLoader(SRDataset(self.data[:self.num_train]),
                                           batch_size=self.args.train_batch_size,
                                           shuffle=True, drop_last=True,
                                           **kwargs)
        self.test_dataloader = DataLoader(SRDataset(self.data[self.num_train:]),
                                          batch_size=self.args.test_batch_size,
                                          shuffle=False, drop_last=True,
                                          **kwargs
                                          )

        '''
        定义模型
        '''
        self.generator = Generator(scale_factor=self.args.factor).to(self.device)
        self.discriminator = Discriminator().to(self.device)
        self.vgg = models.vgg19(pretrained=True).to(self.device).features
        for i in self.vgg.parameters():
            i.requires_grad = False
        self.vgg.eval()

        if use_cuda:
            self.generator = torch.nn.DataParallel(self.generator, device_ids=range(torch.cuda.device_count()))
            self.discriminator = torch.nn.DataParallel(self.discriminator, device_ids=range(torch.cuda.device_count()))
            self.vgg = torch.nn.DataParallel(self.vgg, device_ids=range(torch.cuda.device_count()))
            cudnn.benchmark = True
            cudnn.enabled = True

        if self.args.resume:
            try:
                print("load the weight from pretrained-weight file")
                generator_dict = self.generator.state_dict()
                checkpoint = torch.load(self.args.pretrained_generator_weight)['model_state_dict']
                generator_dict.update(checkpoint)
                self.generator.load_state_dict(generator_dict, strict=True)

                discriminator_dict = self.discriminator.state_dict()
                checkpoint = torch.load(self.args.pretrained_discriminator_weight)['model_state_dict']
                discriminator_dict.update(checkpoint)
                self.discriminator.load_state_dict(discriminator_dict, strict=True)

                print("Restoring the weight from pretrained-weight file \nFinished loading the weight")
            except Exception as e:
                raise e
        else:
            weights_init(self.generator, init_type="orthogonal", init_gain=1)
            weights_init(self.discriminator, init_type="orthogonal", init_gain=1)

        '''
        构造loss目标函数
        选择优化器
        学习率变化选择
        '''

        self.BCE_LOSS = nn.BCELoss()
        self.MSE_LOSS = nn.MSELoss()
        self.L1_LOSS=nn.L1Loss()

        self.g_optim = optim.Adam(self.generator.parameters(), lr=self.args.lr, betas=(0.9, 0.999))
        self.d_optim = optim.Adam(self.discriminator.parameters(), lr=self.args.lr, betas=(0.9, 0.999))

        self.g_scheduler = optim.lr_scheduler.StepLR(self.g_optim, step_size=1, gamma=0.98)
        self.d_scheduler = optim.lr_scheduler.StepLR(self.d_optim, step_size=1, gamma=0.98)

        self.metric = Metric()


    def work(self):
        for epoch in range(1, self.args.epochs + 1):
            self.train(epoch)
            self.test(epoch)
        torch.cuda.empty_cache()

    def train(self, epoch):
        self.generator.train()
        self.discriminator.train()

        g_loss = []
        d_loss = []

        pbar = tqdm(self.train_dataloader, desc=f'Train Epoch{epoch}/{self.args.epochs}',
                    bar_format='{l_bar}%s{bar}%s{r_bar}' % (Fore.BLUE, Fore.RESET))
        for low_r, high_r in pbar:
            low_r, high_r = low_r.to(self.device), high_r.to(self.device)
            real, fake = torch.ones(self.args.train_batch_size, device=self.device), torch.zeros(
                self.args.train_batch_size, device=self.device)

            '''
            判别网络训练
            '''

            self.d_optim.zero_grad()

            d_real = self.discriminator(high_r)
            d_real_loss = self.BCE_LOSS(d_real, real)

            d_fake = self.discriminator(self.generator(low_r))
            d_fake_loss = self.BCE_LOSS(d_fake, fake)

            discriminator_loss = d_fake_loss + d_real_loss
            discriminator_loss.backward()
            self.d_optim.step()
            d_loss.append(discriminator_loss.item())

            '''
            生成网络训练
            '''

            self.g_optim.zero_grad()

            generator_image = self.generator(low_r)
            g_fake = self.discriminator(generator_image)
            g_fake_loss = self.BCE_LOSS(g_fake, real)
            image_loss = self.L1_LOSS(generator_image, high_r)
            perception_loss = self.L1_LOSS(self.vgg(generator_image), self.vgg(high_r))

            generator_loss = image_loss + 1e-3 * g_fake_loss + 2e-6 * perception_loss

            generator_loss.backward()

            self.g_optim.step()
            g_loss.append(generator_loss.item())

            self.metric.update(generator_image, high_r)

            pbar.set_description(f"Train epoch:{epoch} "
                                 f"\tg_loss:{np.round(np.mean(g_loss), 6)} "
                                 f"\td_loss:{np.round(np.mean(d_loss), 6)} "
                                 f"\td_lr:{self.d_optim.param_groups[0]['lr']} "
                                 f"\tg_lr:{self.g_optim.param_groups[0]['lr']} "
                                 f"\t psnr:{self.metric.PSNR().item()}"
                                 f"\t ssim:{self.metric.SSIM()[0].item()}"
                                 )

            self.get_image(generator_image, high_r, low_r)
        self.d_scheduler.step()
        self.g_scheduler.step()

    @torch.no_grad()
    def test(self, epoch):
        self.generator.eval()
        self.discriminator.eval()
        total_loss = []

        pbar = tqdm(self.test_dataloader, desc=f'Train Epoch{epoch}/{self.args.epochs}',
                    bar_format='{l_bar}%s{bar}%s{r_bar}' % (Fore.GREEN, Fore.RESET))
        for low_r, high_r in pbar:
            low_r, high_r = low_r.to(self.device), high_r.to(self.device)
            real, fake = torch.ones(self.args.test_batch_size, device=self.device), torch.zeros(
                self.args.test_batch_size, device=self.device)
            '''
            生成网络训练
            '''
            generator_image = self.generator(low_r)
            g_fake = self.discriminator(generator_image)
            g_fake_loss = self.BCE_LOSS(g_fake, real)
            image_loss = self.MSE_LOSS(generator_image, high_r)
            perception_loss = self.MSE_LOSS(self.vgg(generator_image), self.vgg(high_r))

            generator_loss = image_loss + 1e-3 * g_fake_loss + 2e-6 * perception_loss

            pbar.set_description(f"Train epoch:{epoch} "
                                 f"\tg_loss:{np.round(np.mean(total_loss), 6)} "
                                 )
            total_loss.append(generator_loss.item())

        global global_loss
        if np.mean(total_loss) < global_loss:
            global_loss = np.mean(total_loss)
            torch.save({
                'model_state_dict': self.generator.state_dict()
            },
                self.args.pretrained_generator_weight)
            torch.save({
                'model_state_dict': self.discriminator.state_dict()
            },
                self.args.pretrained_discriminator_weight)

            print("save models")

        self.get_image(generator_image, high_r, low_r)

    @staticmethod
    def get_data(base_dir):
        data = []
        for root, dirs, files in os.walk(base_dir):
            for f in files:
                if f.endswith(".jpg"):
                    data.append(os.path.join(root, f))
        return data

    @staticmethod
    def get_image(generator, hr, lr):
        size = generator.shape[0]

        size = min(size, 4)

        generator = (denormalize(generator.permute((0, 2, 3, 1)).detach().to("cpu").numpy()) * 255).astype('uint8')
        hr = (denormalize(hr.permute((0, 2, 3, 1)).detach().to("cpu").numpy()) * 255).astype('uint8')
        lr = (denormalize(lr.permute((0, 2, 3, 1)).detach().to("cpu").numpy()) * 255).astype('uint8')

        generators = []
        hrs = []
        lrs = []
        for i in range(size):
            gen = Image.fromarray(generator[i]).convert("RGB")
            high = Image.fromarray(hr[i]).convert("RGB")
            low = Image.fromarray(lr[i]).convert("RGB")
            low = low.resize((high.width, high.height))
            generators.append(gen)
            hrs.append(high)
            lrs.append(low)
        width, height = gen.width, gen.height
        target = Image.new("RGB", (width * 3, size * height), (255, 255, 255))

        for i in range(size):
            target.paste(generators[i], (0, height * i))
            target.paste(hrs[i], (width, height * i))
            target.paste(lrs[i], (2 * width, height * i))
        target.save("result/result.jpg")


if __name__ == "__main__":
    model = SRGAN()
    model.work()
