# -*- coding: utf-8 -*-

import torch
import numpy as np
from PIL import Image
from enhance.data.base_dataset import get_transform
from enhance.models import create_model
import cv2
from enhance.options.test_options import TestOptions

def interface(img_path, dst_path):

    opt = TestOptions().parse()  # get test options
    # hard-code some parameters for test
    opt.num_threads = 0  # test code only supports num_threads = 0
    opt.batch_size = 1  # test code only supports batch_size = 1
    opt.serial_batches = True  # disable data shuffling; comment this line if results on randomly chosen images are needed.
    opt.no_flip = True  # no flip; comment this line if results on flipped images are needed.
    opt.display_id = -1  # no visdom display; the test code saves the results to a HTML file.

    opt.checkpoints_dir = "C:\\Users\\Gandalf\\Desktop\\newfloder\\flaskProject723\\enhance\\checkpoints"
    opt.name = "isee_csigan"
    opt.model = "test"
    opt.dataset_mode = "single"
    opt.phase = "test"
    opt.eval = True
    opt.no_dropout = True
    opt.input_nc = 3
    opt.output_nc = 3
    opt.model_suffix = "_A"
    opt.epoch = "120"
    opt.gpu_ids = [0]  # [] for CPU

    data = {}
    data['A_paths'] = img_path
    A_img = Image.open(img_path).convert('RGB')
    transform = get_transform(opt, grayscale=(opt.input_nc == 1))
    A = transform(A_img)
    data['A'] = A.unsqueeze(0)
    model = create_model(opt)  # create a model given opt.model and other options
    model.setup(opt)  # regular setup: load and print networks; create schedulers

    # test with eval mode. This only affects layers like batchnorm and dropout.
    # For [pix2pix]: we use batchnorm and dropout in the original pix2pix. You can experiment it with and without eval() mode.
    # For [CycleGAN]: It should not affect CycleGAN as CycleGAN uses instancenorm without dropout.
    if opt.eval:
        model.eval()

    model.set_input(data)  # unpack data from data loader
    model.test()  # run inference
    visuals = model.get_current_visuals()  # get image results
    print(torch.min(visuals['fake']))
    img_arr = (visuals['fake'].squeeze().cpu().numpy().transpose((1, 2, 0)) + 1.0) / 2.0 * 255.0

    cv2.imwrite(dst_path, img_arr[:, :, ::-1].astype(np.uint8))
    return



