'''
Inference for Composition-1k Dataset.

Run:
python inference.py \
    --config-dir path/to/config
    --checkpoint-dir path/to/checkpoint
    --inference-dir path/to/inference
    --data-dir path/to/data
'''
import os
import torch
from PIL import Image
from tqdm import tqdm
from torch.utils.data import Dataset, DataLoader
from torchvision.transforms import functional as F
from os.path import join as opj
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import LazyConfig, instantiate
from detectron2.engine import default_argument_parser

import warnings

warnings.filterwarnings('ignore')


# Dataset and Dataloader
def collate_fn(batched_inputs):
    rets = dict()
    for k in batched_inputs[0].keys():
        rets[k] = torch.stack([_[k] for _ in batched_inputs])
    return rets


class Composition_1k(Dataset):
    def __init__(self, data_dir):
        self.data_dir = data_dir
        self.file_names = sorted(os.listdir(opj(self.data_dir, 'merged')))

    def __len__(self):
        return len(self.file_names)

    def __getitem__(self, idx):
        phas = Image.open(opj(self.data_dir, 'alpha_copy', self.file_names[idx]))
        tris = Image.open(opj(self.data_dir, 'trimaps', self.file_names[idx]))
        imgs = Image.open(opj(self.data_dir, 'merged', self.file_names[idx]))
        # Todo: remove
        # imgs = Image.open(opj('/media/kd/shared/dataset/new_/adobe/merged_hdr', self.file_names[idx]))
        sample = {}

        sample['trimap'] = F.to_tensor(tris)
        sample['image'] = F.to_tensor(imgs)
        sample['image_name'] = self.file_names[idx]

        return sample


class Distinctions646(Dataset):
    def __init__(self, data_dir):
        self.data_dir = data_dir
        self.file_names = sorted(os.listdir(opj(self.data_dir, 'Image')))

    def __len__(self):
        return len(self.file_names)

    def __getitem__(self, idx):
        phas = Image.open(opj(self.data_dir, 'GT', self.file_names[idx]))
        tris = Image.open(opj(self.data_dir, 'Trimap', self.file_names[idx]))
        imgs = Image.open(opj(self.data_dir, 'Image', self.file_names[idx]))
        sample = {}

        sample['trimap'] = F.to_tensor(tris)
        sample['image'] = F.to_tensor(imgs)
        sample['image_name'] = self.file_names[idx]

        return sample


class AIM500(Dataset):
    def __init__(self, data_dir):
        self.data_dir = data_dir
        self.file_names = sorted(os.listdir(opj(self.data_dir, 'original')))

    def __len__(self):
        return len(self.file_names)

    def __getitem__(self, idx):
        fname = self.file_names[idx]
        fname_png = fname.replace('.jpg', '.png')

        phas = Image.open(opj(self.data_dir, 'mask', fname_png))
        tris = Image.open(opj(self.data_dir, 'trimap', fname_png))
        imgs = Image.open(opj(self.data_dir, 'original', fname))
        # Todo: remove
        # imgs = Image.open(opj('/media/kd/shared/dataset/new_/a5/aim500_merged_hdr_jpg2', self.file_names[idx]))
        sample = {}

        sample['trimap'] = F.to_tensor(tris)
        sample['image'] = F.to_tensor(imgs)
        sample['image_name'] = fname_png

        return sample


class Transparent460(Dataset):
    def __init__(self, data_dir):
        self.data_dir = data_dir
        self.file_names = sorted(os.listdir(opj(self.data_dir, 'composited_images')))

    def __len__(self):
        return len(self.file_names)

    def __getitem__(self, idx):
        fname = self.file_names[idx]
        fname_png = fname.replace('.jpg', '.png')

        phas = Image.open(opj(self.data_dir, 'alpha_copy', fname_png))
        tris = Image.open(opj(self.data_dir, 'trimap_copy', fname_png))
        imgs = Image.open(opj(self.data_dir, 'composited_images', fname))
        sample = {}

        sample['trimap'] = F.to_tensor(tris)
        sample['image'] = F.to_tensor(imgs)
        sample['image_name'] = fname_png

        return sample


class SIMD(Dataset):
    def __init__(self, data_dir):
        self.data_dir = data_dir
        self.file_names = sorted(os.listdir(opj(self.data_dir, 'merged')))

    def __len__(self):
        return len(self.file_names)

    def __getitem__(self, idx):
        phas = Image.open(opj(self.data_dir, 'alpha', self.file_names[idx]))
        tris = Image.open(opj(self.data_dir, 'trimap', self.file_names[idx]))
        imgs = Image.open(opj(self.data_dir, 'merged', self.file_names[idx]))
        sample = {}

        sample['trimap'] = F.to_tensor(tris)
        sample['image'] = F.to_tensor(imgs)
        sample['image_name'] = self.file_names[idx]

        return sample


# model and output
def matting_inference(
        config_dir='',
        checkpoint_dir='',
        inference_dir='',
        data_dir='',
):
    # initializing model
    cfg = LazyConfig.load(config_dir)
    model = instantiate(cfg.model)
    model.to(cfg.train.device)
    model.eval()
    DetectionCheckpointer(model).load(checkpoint_dir)

    # initializing dataset
    dataset_root = os.environ['DATASET_ROOT']
    dataset_adobe = Composition_1k(data_dir=os.path.join(dataset_root, 'adobe/full_dataset/test'))
    dataset_d6 = Distinctions646(data_dir=os.path.join(dataset_root, 'd6/Distinctions-646/Test'))
    dataset_a5 = AIM500(data_dir=os.path.join(dataset_root, 'a5/AIM-500'))
    dataset_t4 = Transparent460(data_dir=os.path.join(dataset_root, 't4/Transparent-460/Test'))
    dataset_simd = SIMD(data_dir=os.path.join(dataset_root, 'simd/SIMD/test_'))

    test_dl = DataLoader(
        dataset=dataset_adobe,
        shuffle=False,
        batch_size=1,
        # collate_fn = collate_fn,
        num_workers=6,
    )

    # inferencing
    os.makedirs(inference_dir, exist_ok=True)

    # half precision enable inference of VitMatte-B on dim481
    model.half()

    for idx, data in enumerate(tqdm(test_dl)):
        with torch.inference_mode():
            for k in data.keys():
                if k == 'image_name':
                    continue
                else:
                    # data[k].to(model.device)
                    # data[k] = data[k].to(model.device)
                    data[k] = data[k].to(model.device).half()

            output = model(data)['phas'].flatten(0, 2)
            output = F.to_pil_image(output)
            output.save(opj(inference_dir, data['image_name'][0]))
            # torch.cuda.empty_cache()


if __name__ == '__main__':
    # add argument we need:
    parser = default_argument_parser()
    parser.add_argument('--config-dir', type=str, required=False)
    parser.add_argument('--checkpoint-dir', type=str, required=False)
    parser.add_argument('--inference-dir', type=str, required=False)
    # parser.add_argument('--data-dir', type=str, required=False)

    args = parser.parse_args()
    # args.config_dir = 'configs/ViTMatte_S_100ep.py'
    args.config_dir = 'configs/ViTMatte_B_100ep.py'


    # args.checkpoint_dir = 'pretrained/ViTMatte_S_Com.pth'
    # args.inference_dir = 'results/c1k/ViTMatte_B_Com'
    # args.inference_dir = 'results/dis646/ViTMatte_S_Com'
    # args.inference_dir = 'results/aim500/ViTMatte_S_Com'
    # args.inference_dir = 'results/trans460/ViTMatte_S_Com'
    # args.inference_dir = 'results/simd/ViTMatte_S_Com'
    # args.inference_dir = 'results/simd/ViTMatte_S_Com'
    # args.inference_dir = 'results/simd_grid22/ViTMatte_S_Com'

    args.checkpoint_dir = 'pretrained/ViTMatte_B_Com.pth'
    # args.inference_dir = 'results/c1k/ViTMatte_B_Com'
    # args.inference_dir = 'results/c1k_hdr/ViTMatte_B_Com'
    args.inference_dir = 'results/c1k_half/ViTMatte_B_Com'
    # args.inference_dir = 'results/dis646/ViTMatte_B_Com'
    # args.inference_dir = 'results/aim500/ViTMatte_B_Com'
    # args.inference_dir = 'results/aim500_hdr2/ViTMatte_B_Com'
    # args.inference_dir = 'results/trans460/ViTMatte_B_Com'
    # args.inference_dir = 'results/simd_grid22/ViTMatte_B_Com'

    # args.checkpoint_dir = 'pretrained/ViTMatte_S_DIS.pth'
    # args.inference_dir = 'results/dis646/ViTMatte_S_DIS'

    # args.checkpoint_dir = 'pretrained/ViTMatte_B_DIS.pth'
    # args.inference_dir = 'results/dis646/ViTMatte_B_DIS'


    matting_inference(
        config_dir=args.config_dir,
        checkpoint_dir=args.checkpoint_dir,
        inference_dir=args.inference_dir,
        data_dir=None
    )
