import os
import random

import torch
from PIL import Image
from torch.utils.data import Dataset
import torchvision.transforms as T

from models.model import Backbone
from options.train_options import TrainOptions
from utils.util import toNormalPic
import torchvision.utils as vutils

class EvaluationDataset(Dataset):
    def __init__(self, root, source_sketch_root=None):
        super(EvaluationDataset, self).__init__()
        self.root = root
        self.source_sketch_root = source_sketch_root
        self.contour_paths, self.image_paths, self.ref_semantic_paths = self.get_paths()
        self.contour_paths.sort()
        random.shuffle(self.image_paths)
        if source_sketch_root is None:
            self.ref_semantic_paths.sort()

    def __getitem__(self, idx):
        sketch = self.contour_paths[idx]
        ref = self.image_paths[idx]
        if self.source_sketch_root is not None:
            ref_semantic = self.findSemantic(self.source_sketch_root, ref)
        else:
            ref_semantic = self.ref_semantic_paths[idx]
        sketch = Image.open(sketch).convert('L')
        ref_semantic = Image.open(ref_semantic).convert('L')
        ref = Image.open(ref).convert('RGB')
        ref_semantic = self.translation(ref_semantic, True)
        sketch = self.translation(sketch,True)
        ref = self.translation(ref,False)
        return sketch, ref, ref_semantic, self.contour_paths[idx]

    def translation(self, img, isSketch=True):
        if isSketch:
            t = T.Compose([T.Resize(256), T.ToTensor(), T.Normalize(0.5, 0.5)])
        else:
            t = T.Compose([T.Resize(256), T.ToTensor(), T.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
        return t(img)

    def __len__(self):
        return len(min(self.contour_paths,self.image_paths))

    def get_paths(self):
        contour_dir = os.path.join(self.root, 'contour')
        img_dir = os.path.join(self.root, 'ref')
        ref_semantic_paths = None
        if self.source_sketch_root is None:
            ref_semantic_dir = os.path.join(self.root,'ref_semantic')
            ref_semantic_paths = [os.path.join(ref_semantic_dir, f) for f in os.listdir(ref_semantic_dir)]
        contour_paths = [os.path.join(contour_dir, f) for f in os.listdir(contour_dir)]
        image_paths = [os.path.join(img_dir, f) for f in os.listdir(img_dir)]
        return contour_paths, image_paths,ref_semantic_paths

    def findSemantic(self, source_sketch_root,file):
        name = os.path.basename(file)
        return os.path.join(source_sketch_root,name)

class ModelUitls:
    def __init__(self, opt, dataset):
        self.opt = opt
        self.model = Backbone(opt)
        self.dataset = dataset
        self.loader = self.getloader()
        self.save_root = os.path.join(os.path.dirname(opt.checkpoints_dir), 'output', opt.name)
        if not os.path.exists(self.save_root + opt.name):
            os.makedirs(self.save_root + opt.name)
    def getloader(self):
        loader = torch.utils.data.DataLoader(self.dataset, batch_size=1, shuffle=False, drop_last=False, num_workers=self.opt.num_workers)
        return loader

    def eval(self):
        self.model.eval()
        for i,data in enumerate(self.loader):
            sketch = data[0].to(self.opt.device)
            ref = data[1].to(self.opt.device)
            ref_semantic = data[2].to(self.opt.device)
            sketch_path = data[3]
            filename = os.path.basename(*sketch_path)
            generate_out = self.model.inference(input_semantics=sketch,ref_semantics=ref_semantic,ref_image=ref)
            fake = generate_out['fake_image'].cpu()
            #warp_out = generate_out['warp_out'].cpu()
            try:
                vutils.save_image(fake, self.save_root + opt.name + '/' + filename,
                                    nrow=1, padding=0, normalize=True)
                # vutils.save_image(fake, self.save_root + opt.name + '/' + str(i) + '_' + 'fake' + '.png',
                #                   nrow=1, padding=0, normalize=True)
                # vutils.save_image(sketch, self.save_root + opt.name + '/' + str(i) + '_' + 'sketch' + '.png',
                #                   nrow=1, padding=0, normalize=True)
                # vutils.save_image(warp_out, self.save_root + opt.name + '/' + str(i) + '_' + 'warp_out' + '.png',
                #                   nrow=1, padding=0, normalize=True)
                # vutils.save_image(ref, self.save_root + opt.name + '/' + str(i) + '_' + 'ref' + '.png',
                #                   nrow=1, padding=0, normalize=True)
            except OSError as err:
                print(err)




if __name__ == '__main__':
    root = r'D:\zhw\data\TestDataset'
    source_sketch_root = r'E:\valDataset_anime\val\sketch'
    # root = r'C:\Users\zhw\Desktop\TestDataset'
    # source_sketch_root = r'C:\Users\zhw\Desktop\data\anime-sketch-colorization-pair-resize\data\val\sketch'
    source_sketch_root2 = r'C:\Users\zhw\Desktop\data\anime-sketch-colorization-pair-resize\data\train\sketch'
    dataset = EvaluationDataset(root, source_sketch_root)
    # sketch, ref, ref_semantic = dataset.__getitem__(1)
    # sketch = toNormalPic(sketch)
    # ref = toNormalPic(ref)
    # ref_semantic = toNormalPic(ref_semantic)
    # import matplotlib.pyplot as plt
    # plt.subplot(1,3,1)
    # plt.imshow(sketch, cmap='gray')
    # plt.subplot(1,3,2)
    # plt.imshow(ref)
    # plt.subplot(1,3,3)
    # plt.imshow(ref_semantic, cmap='gray')
    # plt.show()

    train_parser = TrainOptions()
    opt = train_parser.parse()
    modelutile = ModelUitls(opt, dataset)
    modelutile.eval()
