# encoding=utf-8
import argparse
import logging
import time

import torch
from PIL import Image
from torchvision import transforms
import numpy as np
import cv2 as cv
import os
import functools
from torch import nn
from networks import ResnetGenerator

logging.basicConfig(level=logging.INFO)


def tensor2im(input_image, imtype=np.uint8):
    """"Converts a Tensor array into a numpy image array.

    Parameters:
        input_image (tensor) --  the input image tensor array
        imtype (type)        --  the desired type of the converted numpy array
    """
    image_numpy = input_image
    if image_numpy.shape[0] == 1:  # grayscale to RGB
        image_numpy = np.tile(image_numpy, (3, 1, 1))
    image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0  # post-processing: tranpose and scaling
    return image_numpy.astype(imtype)


class Identity(nn.Module):
    def forward(self, x):
        return x


def get_norm_layer(norm_type='instance'):
    """Return a normalization layer

    Parameters:
        norm_type (str) -- the name of the normalization layer: batch | instance | none

    For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev).
    For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics.
    """
    if norm_type == 'batch':
        norm_layer = functools.partial(
            nn.BatchNorm2d, affine=True, track_running_stats=True)
    elif norm_type == 'instance':
        norm_layer = functools.partial(
            nn.InstanceNorm2d, affine=False, track_running_stats=False)
    elif norm_type == 'none':
        def norm_layer(x):
            return Identity()
    else:
        raise NotImplementedError(
            'normalization layer [%s] is not found' % norm_type)
    return norm_layer


# 模型初始化
model = ResnetGenerator(1, 1, 64, norm_layer=get_norm_layer(), use_dropout=False, n_blocks=9)
model.load_state_dict(torch.load("75_net_G_A.pth"))
model = model.cuda()
model.eval()

# 变化矩阵
transform = transforms.Compose([
    transforms.Resize([518, 518]),
    transforms.ToTensor(),
    transforms.Normalize(0.5, 0.5)
])


def enhance_octa_image(input_path: str, output_path: str):
    logging.info("Now Start enhance image...")
    os.makedirs(output_path, exist_ok=True)

    file_list = os.listdir(input_path)
    t1 = time.perf_counter()

    for each_file in file_list:
        t2 = time.perf_counter()
        name, _ = os.path.splitext(each_file)
        png_save_path = os.path.join(output_path, name + ".png")

        with torch.no_grad():
            # 记录原始大小
            image = Image.open(os.path.join(input_path, each_file))
            ori_size = image.size

            # 数据转换
            gray = image.convert("L")
            trans = transform(gray)
            trans = trans.unsqueeze(0)
            trans = trans.float()
            trans = trans.cuda()

            # 计算
            output = model(trans)

            # 输出
            output = output.squeeze(0)
            output = output.cpu().numpy()
            output = tensor2im(output)
            (r, g, b) = cv.split(output)
            image = cv.merge([b, g, r])
            resize = cv.resize(image, ori_size)

            cv.imwrite(png_save_path, resize)

        t3 = time.perf_counter()
        logging.info("Done {}, time: {}".format(each_file, t3 - t2))

    t4 = time.perf_counter()
    logging.info("All cost: {}".format(t4 - t1))


def run(base_path: str):
    t1 = time.perf_counter()
    # user_input -> enhance_output
    enhance_octa_image(os.path.join(base_path, "user_input"), os.path.join(base_path, "enhance_output"))

    t2 = time.perf_counter()
    logging.info(
        "All cost {}, {} s/pic".format(t2 - t1, (t2 - t1) / len(os.listdir(os.path.join(base_path, "user_input"))))
    )


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument("--workdir", type=str)
    args = parser.parse_args()
    if args.workdir == "":
        logging.error("work dir is empty")

    run(args.workdir)
