import os
import cv2
import argparse
import torch.utils.data as data
import tqdm

from PIL import Image
from dataloder import get_loader
from torch.backends import cudnn
from config import config, dataset_config, merge_cfg_arg
from solver_makeup import Solver_makeupGAN
from preprocessing import PreProcess
from torch.utils.data import DataLoader
from torchvision.utils import save_image
os.environ['CUDA_VISIBLE_DEVICES'] = '3'
makeup_first_dir = 'beauty_new/makeup_first'
nonmakeup_first_dir = 'beauty_new/nonmakeup_first'
os.makedirs(makeup_first_dir, exist_ok=True)
os.makedirs(nonmakeup_first_dir, exist_ok=True)


class TestFixed_Dataset(data.Dataset):
    def __init__(self, root=None, dim=(256, 256), img_size=256, test_name=None):
        super(TestFixed_Dataset, self).__init__()
        if root:
            self.root = root
        else:
            self.root = os.path.join(os.environ['DATAROOT'], 'MT-Dataset')
        if test_name is None:
            test_name = 'test_0412.txt'

        with open(os.path.join(self.root, test_name), 'r') as f:
            self.makeup_names = [name.strip().split(' ')[1] for name in f.readlines()]
        with open(os.path.join(self.root, test_name), 'r') as f:
            self.non_makeup_names = [name.strip().split(' ')[0] for name in f.readlines()]
        self.preprocessor = PreProcess()
        self.img_size = img_size
        self.dim = dim

    def load_from_file(self, img_name):
        image = Image.open(os.path.join(self.root, 'images', img_name)).convert('RGB')
        mask = self.preprocessor.load_mask(os.path.join(self.root, 'segs', img_name))
        base_name = os.path.splitext(img_name)[0]
        lms = self.preprocessor.load_lms(os.path.join(self.root, 'lms', f'{base_name}.npy'))
        return self.preprocessor.process(image, mask, lms)

    def __len__(self):
        return len(self.makeup_names)

    def __getitem__(self, index):
        name_s = self.non_makeup_names[index]
        name_r = self.makeup_names[index]
        basename_s = os.path.basename(name_s).split('.')[0]
        basename_r = os.path.basename(name_r).split('.')[0]
        basename = '%s&%s' %(basename_s, basename_r)

        source = self.load_from_file(name_s)
        reference = self.load_from_file(name_r)
        img_nonmakeup_seg = cv2.imread(os.path.join(self.root, 'scgan_segs', name_s), 0)
        img_makeup_seg = cv2.imread(os.path.join(self.root, 'scgan_segs', name_r), 0)
        if self.dim:
            img_nonmakeup_seg = cv2.resize(img_nonmakeup_seg, self.dim, interpolation=cv2.INTER_AREA)
            img_makeup_seg = cv2.resize(img_makeup_seg, self.dim, interpolation=cv2.INTER_AREA)

        return_dict = {}
        prompt = 'makeup transfer'
        return_dict['source'] = source
        return_dict['nonmakeup_img'] = source[0]             # [-1, 1]
        return_dict['nonmakeup_seg'] = img_nonmakeup_seg     # [0, 14]
        return_dict['src_img'] = (source[0] + 1) / 2         # [0, 1]
        # reference
        return_dict['reference'] = reference
        return_dict['makeup_img'] = reference[0]
        return_dict['makeup_seg'] = img_makeup_seg
        return_dict['ref_img'] = (reference[0] + 1) / 2
        return_dict['txt'] = prompt
        return_dict['img_name'] = basename
        return return_dict


def parse_args():
    parser = argparse.ArgumentParser(description='Test GAN')
    # general
    parser.add_argument('--data_path', default='../../datasets/MT-Dataset', type=str, help='training and test data path')
    parser.add_argument('--dataset', default='MAKEUP', type=str)
    parser.add_argument('--gpus', default='0', type=str, help='GPU device to train with')
    parser.add_argument('--batch_size', default='1', type=int, help='batch_size')
    parser.add_argument('--vis_step', default='1260', type=int, help='steps between visualization')
    parser.add_argument('--task_name', default='', type=str, help='task name')
    parser.add_argument('--checkpoint', default='', type=str, help='checkpoint to load')
    parser.add_argument('--ndis', default='1', type=int, help='train discriminator steps')
    parser.add_argument('--LR', default="2e-4", type=float, help='Learning rate')
    parser.add_argument('--decay', default='0', type=int, help='epochs number for training')
    parser.add_argument('--model', default='makeupGAN', type=str, help='which model to use: cycleGAN/ makeupGAN')
    parser.add_argument('--epochs', default='300', type=int, help='nums of epochs')
    parser.add_argument('--whichG', default='branch', type=str, help='which Generator to choose, normal/branch, branch means two input branches')
    parser.add_argument('--norm', default='SN', type=str, help='normalization of discriminator, SN means spectrum normalization, none means no normalization')
    parser.add_argument('--d_repeat', default='3', type=int, help='the repeat Res-block in discriminator')
    parser.add_argument('--g_repeat', default='6', type=int, help='the repeat Res-block in Generator')
    parser.add_argument('--lambda_cls', default='1', type=float, help='the lambda_cls weight')
    parser.add_argument('--lambda_rec', default='10', type=int, help='lambda_A and lambda_B')
    parser.add_argument('--lambda_his', default='1', type=float, help='histogram loss on lips')
    parser.add_argument('--lambda_skin_1', default='0.1', type=float, help='histogram loss on skin equals to lambda_his* lambda_skin')
    parser.add_argument('--lambda_skin_2', default='0.1', type=float, help='histogram loss on skin equals to lambda_his* lambda_skin')
    parser.add_argument('--lambda_eye', default='1', type=float, help='histogram loss on eyes equals to lambda_his*lambda_eye')
    parser.add_argument('--content_layer', default='r41', type=str, help='vgg layer we use to output features')
    parser.add_argument('--lambda_vgg', default='5e-3', type=float, help='the param of vgg loss')
    parser.add_argument('--cls_list', default='N,M', type=str, help='the classes of makeup to train')
    parser.add_argument('--direct', type=bool, default=True, help='direct means to add local cosmetic loss at the first, unified training')
    parser.add_argument('--lips', type=bool, default=True, help='whether to finetune lips color')
    parser.add_argument('--skin', type=bool, default=True, help='whether to finetune foundation color')
    parser.add_argument('--eye', type=bool, default=True, help='whether to finetune eye shadow color')
    args = parser.parse_args()
    return args


def save_result(img, img_name, index):
    src, ref = img_name.strip().split('&')
    sr_name = '%s&%s&%03d.png' %(src, ref, index)
    rs_name = '%s&%s&%03d.png' %(ref, src, index)
    img = (img + 1) / 2
    save_image(img, os.path.join(nonmakeup_first_dir, sr_name))
    save_image(img, os.path.join(makeup_first_dir, rs_name))


def test_net():
    # enable cudnn
    cudnn.benchmark = True
    data_loaders = get_loader(config, mode="train")    # return train&test
    solver = Solver_makeupGAN(data_loaders, config, dataset_config)
    dataset = TestFixed_Dataset(test_name='test_0412.txt')
    dataloader = DataLoader(dataset, num_workers=2, batch_size=1, shuffle=False)
    for i, data in tqdm.tqdm(enumerate(dataloader)):
        makeup_img = data['makeup_img'].cuda()
        nonmakeup_img = data['nonmakeup_img'].cuda()
        result = solver.test(nonmakeup_img, makeup_img)
        img_name = data["img_name"][0]
        save_result(result, img_name, i)


if __name__ == '__main__':
    args = parse_args()
    config = merge_cfg_arg(config, args)
    # config.checkpoint = '200_1260'
    config.checkpoint = '300_2520'

    dataset_config.name = args.dataset
    print('           ⊱ ──────ஓ๑♡๑ஓ ────── ⊰')
    print('🎵 hhey, arguments are here if you need to check 🎵')
    for arg in vars(config):
        print('{:>15}: {:>30}'.format(str(arg), str(getattr(config, arg))))
    print()
    # Create the directories if not exist
    if not os.path.exists(config.data_path):
        print("No datapath!!", config.data_path)
        exit()

    test_net()
