from __future__ import print_function
import argparse
import torch
import math
from torch.autograd import Variable
from PIL import Image
import matplotlib.pyplot as plt
from torchvision.transforms import ToTensor
import torchvision.transforms as transforms
import numpy as np

parser = argparse.ArgumentParser(description='PyTorch Super Res Example')
parser.add_argument('--input_image', type=str, required=True, help='input image to use')
parser.add_argument('--model', type=str, required=True, help='model file to use')
parser.add_argument('--output_filename', type=str, help='where to save the output image')
parser.add_argument('--scale_factor', type=float, help='factor by which super resolution needed')
opt = parser.parse_args()

def test_Demo():

    img = Image.open(opt.input_image).convert('YCbCr')
    y,cb,cr = img.split()
    y = y.resize((int(y.size[0] * opt.scale_factor),
                  int(y.size[1] * opt.scale_factor)), Image.BICUBIC)

    model = torch.load(opt.model, map_location=torch.device('cpu'))
    y = Variable(ToTensor()(y)).view(1,-1,y.size[1],y.size[0])
    out = model(y)
    print('input shape is',img.shape)
    print("networks output shape is",out.shape)

    tt = transforms.ToPILImage()
    img_out = tt(out.data[0])
    img_out = img_out.convert('RGB')

    out_img_y = out.data[0].numpy()
    out_img_y *= 255
    out_img_y = out_img_y.clip(0,255)
    out_img_y = Image.fromarray(np.uint8(out_img_y[0]),mode='L')

    out_cb = cb.resize(out_img_y.size,Image.BICUBIC)
    out_cr = cr.resize(out_img_y.size,Image.BICUBIC)
    out_SR_image = Image.merge('YCbCr',[out_img_y,out_cb,out_cr]).convert('RGB')
    out_SR_image.save(opt.output_filename)

    plt.figure()
    plt.imshow(out_SR_image)
    plt.title("SR result img_{}".format(out_SR_image.size))

    plt.figure()
    plt.imshow(img)
    plt.title("Input LR image_{}".format(img.size))
    plt.show()

if __name__ =="__main__":
    test_Demo()