#!/usr/bin/env python
# -*- encoding: utf-8 -*-

"""
@Author  :   Peike Li
@Contact :   peike.li@yahoo.com
@File    :   datasets.py
@Time    :   8/4/19 3:35 PM
@Desc    :
@License :   This source code is licensed under the license found in the
             LICENSE file in the root directory of this source tree.
"""

import os
import numpy as np
import sys
sys.path.append(os.getcwd())
import random
import torch
import cv2
from torch.utils import data

from utils.transforms import get_affine_transform
class FaceDateset_CELEB(data.Dataset):
    def __init__(self, root, dataset, crop_size=[128, 128], transform=None,val=False):
        super(FaceDateset_CELEB).__init__()
        self.root = root
        self.aspect_ratio = crop_size[1] * 1.0 / crop_size[0]
        self.crop_size = np.asarray(crop_size)
        self.dataset = dataset
        self.scale_factor = 0.25
        self.rotation_factor = 30
        if val:
            self.train_path=os.path.join(root, 'val_img/')
            self.label_path=os.path.join(root, 'val_label/')
            self.train_list=[]
        else:
            self.train_path=os.path.join(root, 'train_img/')
            self.label_path=os.path.join(root, 'train_label/')
            self.train_list=[]
        self.train_list += [img for img in os.listdir(self.train_path)]
        self.transform = transform
        self.number_samples = len(self.train_list)

    def __len__(self):
        return self.number_samples
    def _box2cs(self, box):
        x, y, w, h = box[:4]
        return self._xywh2cs(x, y, w, h)

    def _xywh2cs(self, x, y, w, h):
        center = np.zeros((2), dtype=np.float32)
        center[0] = x + w * 0.5
        center[1] = y + h * 0.5
        if w > self.aspect_ratio * h:
            h = w * 1.0 / self.aspect_ratio
        elif w < self.aspect_ratio * h:
            w = h * self.aspect_ratio
        scale = np.array([w * 1.0, h * 1.0], dtype=np.float32)
        return center, scale
    def _preprocess(self, img, parsing_anno):
        #do data augumentation
        sf = self.scale_factor
        rf = self.rotation_factor
        h, w, _ = img.shape
        parsing_anno = parsing_anno
        person_center, s = self._box2cs([0, 0, w - 1, h - 1])
        r = 0
        s = s * np.clip(np.random.randn() * sf + 1, 1 - sf, 1 + sf)
        r = np.clip(np.random.randn() * rf, -rf * 2, rf * 2) if random.random() <= 0.6 else 0
        trans = get_affine_transform(person_center, s, r, self.crop_size)
        input = cv2.warpAffine(
            img,
            trans,
            (int(self.crop_size[1]), int(self.crop_size[0])),
            flags=cv2.INTER_LINEAR,
            borderMode=cv2.BORDER_CONSTANT,
            borderValue=(0, 0, 0))
        # print(parsing_anno.shape)
        label_parsing = cv2.warpAffine(
            parsing_anno,
            trans,
            (int(self.crop_size[1]), int(self.crop_size[0])),
            flags=cv2.INTER_NEAREST,
            borderMode=cv2.BORDER_CONSTANT,
            borderValue=(255))
        # print(label_parsing.shape)
        return input, label_parsing
    def __getitem__(self, index):
        train_item = self.train_list[index]
        im_path = os.path.join(self.train_path , train_item)
        parsing_anno_path = os.path.join(self.label_path, train_item).replace('jpg', 'png')
        im = cv2.imread(im_path, cv2.IMREAD_COLOR)
        h, w, _ = im.shape
        # parsing_anno = np.zeros((h, w), dtype=np.long)
        parsing_anno = cv2.imread(parsing_anno_path, cv2.IMREAD_GRAYSCALE)

        parsing_anno = cv2.resize(parsing_anno, (im.shape[1],im.shape[0]), interpolation=cv2.INTER_NEAREST)

        input, parsing_anno =self._preprocess(im, parsing_anno)
        if self.transform:
            input = self.transform(input)
        parsing_anno = torch.from_numpy(parsing_anno)
        return input, parsing_anno, meta

def vis_parsing_maps(im, parsing_anno, save_im=False, save_path='',im_name='1.png'):
    stride=0
    # Colors for all 20 parts
    part_colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0],
                   [255, 0, 85], [255, 0, 170],
                   [0, 255, 0], [85, 255, 0], [170, 255, 0],
                   [0, 255, 85], [0, 255, 170],
                   [0, 0, 255], [85, 0, 255], [170, 0, 255],
                   [0, 85, 255], [0, 170, 255],
                   [255, 255, 0], [255, 255, 85], [255, 255, 170],
                   [255, 0, 255], [255, 85, 255], [255, 170, 255],
                   [0, 255, 255], [85, 255, 255], [170, 255, 255]]

    im = np.array(im)
    vis_im = im.copy().astype(np.uint8)
    vis_parsing_anno = parsing_anno.copy().astype(np.uint8)
    vis_parsing_anno[vis_parsing_anno==255]=0
    # vis_parsing_anno = cv2.resize(vis_parsing_anno, None, fx=stride, fy=stride, interpolation=cv2.INTER_NEAREST)
    vis_parsing_anno_color = np.zeros((vis_parsing_anno.shape[0], vis_parsing_anno.shape[1], 3)) + 255

    num_of_class = np.max(vis_parsing_anno)

    for pi in range(1, num_of_class + 1):
        index = np.where(vis_parsing_anno == pi)
        vis_parsing_anno_color[index[0], index[1], :] = part_colors[pi]

    vis_parsing_anno_color = vis_parsing_anno_color.astype(np.uint8)
    vis_im = cv2.addWeighted(cv2.cvtColor(vis_im, cv2.COLOR_RGB2BGR), 0.4, vis_parsing_anno_color, 0.6, 0)

    # Save result or not
    if save_im:
        cv2.imwrite(save_path+'/anno/'+im_name[:-4] +'.png', vis_parsing_anno)
        cv2.imwrite(save_path+'/weights_img/'+im_name, vis_im, [int(cv2.IMWRITE_JPEG_QUALITY), 100])

if __name__ == '__main__':
    dataset = FaceDateset_CELEB('./data/CelebAMask-HQ', "face_dataset", val=True)
    for i in range(len(dataset.train_list)):
        input, parsing_anno, meta= dataset[i]
        # print(np.unique(parsing_anno[512:]))
        # print(np.unique(parsing_anno[512:][521:]))

        print(parsing_anno.shape, input.shape)
        print(np.unique(parsing_anno))

        vis_parsing_maps(input, parsing_anno, save_im=True, save_path='./data_process/',im_name=dataset.train_list[i][-12:])