import argparse
import os

import albumentations as A
import torch
from albumentations.pytorch import ToTensorV2
from rainbowneko.train.data.handler import ImageHandler
from torchvision import transforms as T

from model import HourGlassNetMultiScaleFormer
from utils import tensor_divide, tensor_merge
from tqdm import tqdm

class Infer:
    def __init__(self, opt, device='cuda'):
        self.device = device
        self.sr_scale = opt.sr_scale

        # Make Result Folder
        os.makedirs(opt.result_dir, exist_ok=True)

        # Init Net
        print('Build Generator Net...')
        generator = HourGlassNetMultiScaleFormer(upscale=opt.sr_scale, HG_stage=(2, 3, 4))
        generator.load_state_dict(torch.load(opt.ckpt))
        generator = generator.to(device)
        generator.eval()

        self.generator = generator
        self.handler = ImageHandler(transform=A.Compose([
            A.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
            ToTensorV2(),
        ]))
        self.to_pil = T.ToPILImage()

    def load_image(self, path):
        img = self.handler(path)
        return img.unsqueeze(0).to(self.device)

    def infer_one(self, path, psize, overlap):
        lr = self.load_image(path)

        with torch.no_grad():
            B, C, H, W = lr.shape
            blocks = tensor_divide(lr, psize, overlap)
            blocks = torch.cat(blocks, dim=0)
            results = []

            iters = blocks.shape[0]
            for idx in tqdm(range(iters)):
                if idx + 1 == iters:
                    input = blocks[idx:]
                else:
                    input = blocks[idx: idx + 1]
                lr_var = input.to(self.device)
                sr_var, SR_map = self.generator(lr_var)

                if isinstance(sr_var, list) or isinstance(sr_var, tuple):
                    sr_var = sr_var[-1]

                results.append(sr_var.to('cpu'))

            results = torch.cat(results, dim=0)
            sr_img = tensor_merge(results, None, psize * self.sr_scale, overlap * self.sr_scale,
                                  tensor_shape=(B, C, H * self.sr_scale, W * self.sr_scale))

        sr_img = self.to_pil(torch.clamp((sr_img[0] + 1) / 2, min=0.0, max=1.0).cpu())
        return sr_img


# python demo.py --image imgs/loli1 --result_dir output/  --ckpt exps/CDC-Former-L-v1/ckpts/HG-Former-12000.ckpt
if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    ## Test Dataset
    parser.add_argument('--image', type=str, required=True)

    # Test Options
    parser.add_argument('--overlap', type=int, default=64, help='Overlap pixel when Divide input image, for edge effect')
    parser.add_argument('--psize', type=int, default=512, help='Overlap pixel when Divide input image, for edge effect')

    # Model Options
    parser.add_argument('--sr_scale', type=int, default=2, help='the low resolution image size')

    parser.add_argument('--ckpt', type=str, default='./ckpt/HGSR-MHR-anime_X4_280.pth')
    parser.add_argument('--result_dir', type=str, default='result', help='folder to sr results')
    opt = parser.parse_args()

    infer = Infer(opt)
    img = infer.infer_one(opt.image, opt.psize, opt.overlap)

    im_name = os.path.basename(opt.image)
    img.save(os.path.join(opt.result_dir, im_name), quality=100)
