import os.path as osp
import logging
import os
import time
import argparse
from collections import OrderedDict

import options.options as option
import utils.util as util
from data.util import bgr2ycbcr
from data import create_dataset, create_dataloader
from models import create_model
from alfred.utils.file_io import ImageSourceIter
import cv2
import torch
import numpy as np

# options
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--data', type=str, default='')
parser.add_argument('-opt', type=str, required=True,
                    help='Path to options YMAL file.')
args = parser.parse_args()
opt = option.parse(args.opt, is_train=False)
opt = option.dict_to_nonedict(opt)

util.mkdirs(
    (path for key, path in opt['path'].items()
     if not key == 'experiments_root' and 'pretrain_model' not in key and 'resume' not in key))
util.setup_logger('base', opt['path']['log'], 'test_' + opt['name'], level=logging.INFO,
                  screen=True, tofile=True)
logger = logging.getLogger('base')
logger.info(option.dict2str(opt))

dataset_dir = 'results'
os.makedirs(dataset_dir, exist_ok=True)


def demo():
    model = create_model(opt)
    print('Model is ready.')
    # read image
    # data_f can be image_file or image_folder or video
    iter = ImageSourceIter(args.data)
    while True:
        itm = next(iter)
        print(itm)
        if isinstance(itm, str):
            img_p = itm
            itm = cv2.imread(itm)
        # cv2.imshow('raw', itm)
        print(itm.shape)
        inp = np.array(itm).astype(np.float32)[
            :, :, [2, 1, 0]] / 255.  # BGR2RGB
        data = {'LQ': torch.tensor(inp).permute(2, 0, 1).unsqueeze(0)}
        model.feed_data(data, need_GT=False)
        img_name = osp.splitext(osp.basename(img_p))[0]
        if opt['model'] == 'sr':
            model.test_x8()
        elif opt['large'] is not None:
            model.test_chop()
        else:
            model.test()
        if opt['back_projection'] is not None and opt['back_projection'] is True:
            model.back_projection()
        visuals = model.get_current_visuals(need_GT=False)

        sr_img = util.tensor2img(visuals['SR'])  # uint8

        # save images
        suffix = opt['suffix']
        if suffix:
            save_img_path = osp.join(dataset_dir, img_name + suffix + '.png')
        else:
            save_img_path = osp.join(dataset_dir, img_name + '.png')
        util.save_img(sr_img, save_img_path)

        # calculate PSNR and SSIM

        cv2.imshow('res', itm)
        cv2.imshow('sr', sr_img)
        if iter.video_mode:
            cv2.waitKey(1)
        else:
            cv2.waitKey(0)


if __name__ == '__main__':
    demo()
