# -*- coding: utf-8 -*-
# !/usr/bin/env python
"""
-------------------------------------------------
   File Name：     inference
   Description :   
   Author :       lth
   date：          2022/1/24
-------------------------------------------------
   Change Activity:
                   2022/1/24 12:27: create this script
-------------------------------------------------
this script is used for the test, its main purpose is to convenient the developer test
and everyone can use this model easily
"""
__author__ = 'lth'

import torch
from PIL import Image
from torch.backends import cudnn

from config import GetConfig
from datalist import test_transform
from model import Generator
from utils import denormalize


class Inference:
    def __init__(self):
        self.args = GetConfig()
        print(f"-----------{self.args.project_name}-------------")
        use_cuda = self.args.use_cuda and torch.cuda.is_available()
        self.device = torch.device("cuda" if use_cuda else "cpu")

        self.generator = Generator(scale_factor=self.args.factor).to(self.device)
        self.generator.eval()
        if use_cuda:
            self.generator = torch.nn.DataParallel(self.generator, device_ids=range(torch.cuda.device_count()))
            cudnn.benchmark = True
            cudnn.enabled = True
        model_dict = self.generator.state_dict()
        checkpoint = torch.load(self.args.pretrained_generator_weight)['model_state_dict']
        model_dict.update(checkpoint)
        self.generator.load_state_dict(model_dict, strict=True)

    @torch.no_grad()
    def predict(self, image):
        image = Image.open(image).convert("RGB")
        image_clone = image

        image = test_transform(image)
        image = image.unsqueeze(0)

        generator_image = self.generator(image)
        generator_image = self.get_image(generator_image)

        target = Image.new("RGB", (2 * generator_image.width, generator_image.height))
        target.paste(generator_image, (0, 0))
        image = image_clone.resize((generator_image.width, generator_image.height))
        target.paste(image, (generator_image.width, 0))

        target.show()
        torch.cuda.empty_cache()

    @staticmethod
    def get_image(generator):
        generator = (denormalize(generator.permute((0, 2, 3, 1)).detach().to("cpu").numpy()) * 255).astype('uint8')
        generator = Image.fromarray(generator[0]).convert("RGB")
        width, height = generator.width, generator.height
        target = Image.new("RGB", (width, height), (255, 255, 255))
        target.paste(generator, (0, 0))

        return target


if __name__ == "__main__":
    model = Inference()
    model.predict("1.jpg")
