# coding=utf8

from __future__ import division
import os
import torch
import torch.utils.data as data
import numpy as np
import pandas as pd
import cv2
from collections import OrderedDict
cv2.setNumThreads(0)
cate2idx = OrderedDict(
    {
        'blouse':0,
        'dress':1,
        'outwear':2,
        'skirt':3,
        'trousers':4
    }
)
idx2cate = OrderedDict({idx:cate for cate, idx in cate2idx.items()})

Skdict = OrderedDict(
    {
        0:[5,6],
        1:[5,6],
        2:[5,6],
        3:[15,16],
        4:[15,16]
    }
)

kp2idx = OrderedDict(
    {
        'neckline_left':0,
        'neckline_right':1,
        'center_front':2,
        'shoulder_left':3,
        'shoulder_right':4,
        'armpit_left':5,
        'armpit_right':6,
        'waistline_left':7,
        'waistline_right':8,
        'cuff_left_in':9,
        'cuff_left_out':10,
        'cuff_right_in':11,
        'cuff_right_out':12,
        'top_hem_left':13,
        'top_hem_right':14,
        'waistband_left':15,
        'waistband_right':16,
        'hemline_left':17,
        'hemline_right':18,
        'crotch':19,
        'bottom_left_in':20,
        'bottom_left_out':21,
        'bottom_right_in':22,
        'bottom_right_out':23,
    }
)

idx2kp = OrderedDict({idx:kp for kp, idx in kp2idx.items()})

kpPairs = OrderedDict(
    {
        0:1,  1:0,
        3:4,  4:3,
        5:6,  6:5,
        7:8,  8:7,
        9:11, 11:9,
        10:12, 12:10,
        13:14, 14:13,
        15:16, 16:15,
        17:18, 18:17,
        20:21, 21:20,
        22:23, 23:22
    }
)

kpInCates = OrderedDict(
    {
        0 : [0,1,3,4,2] + [5,6,13,14] + [9,10,11,12],
        2 : [0,1,3,4,5,6,7,8] + [9,10,11,12] + [13,14],
        4 : [15,16,19] + [20,21,22,23],
        3 : [15,16] + [17,18],
        1 : [0,1,2,3,4] + [5,6,7,8] + [9,10,11,12] + [17,18]
    }
)






class LMdata(data.Dataset):
    def __init__(self, anno_pd, transforms):
        anno_pd.index = range(anno_pd.shape[0])
        self.paths = anno_pd['image_id'].tolist()
        self.cates = anno_pd['image_category'].tolist()
        self.transforms = transforms

        # deal with label
        self.landmarks = np.zeros((anno_pd.shape[0],24,3),dtype=np.int)
        for i in range(24):
            self.landmarks[:, i ,0] = anno_pd[idx2kp[i]].str.split('_').str[0].astype(int)  # x coord [-1,512]
            self.landmarks[:, i, 1] = anno_pd[idx2kp[i]].str.split('_').str[1].astype(int)  # y coord [-1,512]
            self.landmarks[:, i, 2] = anno_pd[idx2kp[i]].str.split('_').str[2].astype(int)  # vis  [-1, 0, 1]
        self.num_kps = 24
    def __len__(self):
        return len(self.paths)

    def __getitem__(self, item):
        img = cv2.cvtColor(cv2.imread(self.paths[item]), cv2.COLOR_BGR2RGB)  # [h,w,3]  RGB
        lm = self.landmarks[item]  # (24,3) int
        cate_idx = cate2idx[self.cates[item]]

        flm = np.zeros(lm.shape,dtype=np.float32)-1
        flm[lm[:, 2] > -1, 0] = lm[lm[:, 2] > -1, 0] / img.shape[1] # x , relative
        flm[lm[:, 2] > -1, 1] = lm[lm[:, 2] > -1, 1] / img.shape[0] # y , relative
        flm[:, 2] = lm[:, 2]

        ori_size = [img.shape[0],img.shape[1]]

        # generate landmark mask of shape (24, mask_size[0], mask_size[1])
        lm_mask = np.zeros(1)
        img, lm_mask, flm, ori_size = self.transforms(img, lm_mask, flm, ori_size)


        # img : (3,H,W)  RGB
        # lm_mask : (24,H,W)
        # lm : (24, 3)
        # flm : (24, 3)

        vis_mask = (lm[:,2]>0).astype(int)

        return torch.from_numpy(img).float(), \
               torch.from_numpy(lm_mask).float(), \
               torch.from_numpy(vis_mask).float(), \
               ori_size,\
               flm, \
               cate_idx


def collate_fn(batch):
    imgs = []
    lm_mask = []
    vis_mask = []
    ori_size = []
    flm = []
    cate_idx = []

    for sample in batch:
        imgs.append(sample[0])
        lm_mask.append(sample[1])
        vis_mask.append(sample[2])
        ori_size.append(sample[3])
        flm.append(sample[4])
        cate_idx.append(sample[5])

    return torch.stack(imgs, 0), \
           torch.stack(lm_mask, 0), \
           torch.stack(vis_mask, 0), \
           np.array(ori_size), \
           np.array(flm), \
           np.array(cate_idx)

