import cv2
import numpy as np
import torch.nn.functional as F
from torch.autograd import Variable
import torch
from LMdata.LMdataset import cate2idx, Skdict, kp2idx,idx2cate,idx2kp,kpInCates
import os
import pandas as pd
from sklearn.model_selection import train_test_split
import hashlib

attr_to_cate = {
    'pant_length_labels': 'trousers',
    'skirt_length_labels': 'skirt',
}


def add_fake_kp(df, idx2kp_map):
    for idx in idx2kp_map:
        df[idx2kp_map[idx]] = '1_1_1'
    return df


def filt_valid_kp(df):
    for cate in df['image_category'].unique():
        cate_idx = cate2idx[cate]
        for col in df.drop(['image_id', 'image_category'],axis=1).columns:
            kp_idx = kp2idx[col]
            if kp_idx not in kpInCates[cate_idx]:
                df.loc[df['image_category']==cate, col] = '-1_-1_-1'
    return df

def get_label_from_df(df):
    true_xyv = np.zeros((df.shape[0], 24, 3), dtype=np.int)
    for i in range(24):
        true_xyv[:, i, 0] = df[idx2kp[i]].str.split('_').str[0].astype(int)  # x coord [-1,512]
        true_xyv[:, i, 1] = df[idx2kp[i]].str.split('_').str[1].astype(int)  # y coord [-1,512]
        true_xyv[:, i, 2] = df[idx2kp[i]].str.split('_').str[2].astype(int)  # vis  [-1, 0, 1]

    return true_xyv


def join_path_to_df(df, *path_to_join ):
    path_to_join = os.path.join(*path_to_join)
    df['image_id'] = df['image_id'].apply(lambda x: os.path.join(path_to_join, x))
    return df

def get_train_val(root_path):

    warmup_path = os.path.join(root_path, 'round1/warm_up')
    round_1_train_path = os.path.join(root_path, 'round1/train')
    test_a_path = os.path.join(root_path, 'round1/test_a')
    test_b_path = os.path.join(root_path, 'round1/test_b')

    test_a_csv = os.path.join(test_a_path, 'fashionAI_key_points_test_a_answer_20180426.csv')
    test_b_csv = os.path.join(test_b_path, 'fashionAI_key_points_test_b_answer_20180426.csv')
    round_1_train_csv = os.path.join(round_1_train_path, 'Annotations/train.csv')
    warmup_train_csv = os.path.join(warmup_path, 'Annotations/annotations.csv')

    anno_test_a = pd.read_csv(test_a_csv)
    anno_test_b = pd.read_csv(test_b_csv)
    anno_round_1 = pd.read_csv(round_1_train_csv)
    anno_warmup = pd.read_csv(warmup_train_csv)

    anno_test_a["image_id"] = anno_test_a["image_id"].apply(lambda x:os.path.join(test_a_path,x))
    anno_test_b["image_id"] = anno_test_b["image_id"].apply(lambda x:os.path.join(test_b_path,x))
    anno_round_1["image_id"] = anno_round_1["image_id"].apply(lambda x:os.path.join(round_1_train_path,x))
    anno_warmup["image_id"] = anno_warmup["image_id"].apply(lambda x:os.path.join(warmup_path,x))

    train_test_a,val_test_a = train_test_split(anno_test_a,test_size = 0.3,stratify=anno_test_a["image_category"],random_state=42)
    train_test_b,val_test_b = train_test_split(anno_test_b,test_size = 0.3,stratify=anno_test_b["image_category"],random_state=42)

    train_pd = anno_warmup.append(train_test_a).append(train_test_b).append(anno_round_1)
    val_pd = val_test_a.append(val_test_b)

    train_pd_hash = set(train_pd["image_id"].apply(lambda x:hashlib.md5(open(x,"rb").read()).hexdigest()))
    val_pd_hash = val_pd["image_id"].apply(lambda x:hashlib.md5(open(x,"rb").read()).hexdigest())
    val_index = list(map(lambda x:x not in train_pd_hash,val_pd_hash))
    val_pd_filtered = val_pd[val_index]

    return train_pd,val_pd_filtered

def join_path_to_df_attr(df, *path_to_join ):
    path_to_join = os.path.join(*path_to_join)
    df['ImageName'] = df['ImageName'].apply(lambda x: os.path.join(path_to_join, x))
    return df


def get_train_val_attr(rawdata_root):
    round1_df = pd.read_csv(os.path.join(rawdata_root, 'round1/base/Annotations/label.csv'),
                            header=None, names=['ImageName', 'AttrKey', 'AttrValues'])
    round1_df = join_path_to_df_attr(round1_df, rawdata_root, 'round1/base')

    round2_df = pd.read_csv(os.path.join(rawdata_root, 'round2/train/Annotations/label.csv'),
                            header=None, names=['ImageName', 'AttrKey', 'AttrValues'])
    round2_df = join_path_to_df_attr(round2_df, rawdata_root, 'round2/train')

    extra_df = pd.read_csv(os.path.join(rawdata_root, 'round2/round2_data_add_skirt_legth.txt'),
                           header=None, names=['ImageName', 'AttrKey', 'AttrValues'])
    extra_df = join_path_to_df_attr(extra_df, rawdata_root, 'round1/web')

    round2_train_pd, val_pd = train_test_split(round2_df, test_size=0.1, random_state=37,
                                               stratify=round2_df['AttrKey'])

    train_pd = pd.concat([round2_train_pd, round1_df, extra_df], axis=0, ignore_index=True)
    train_pd.index = range(train_pd.shape[0])

    return train_pd,val_pd





def batch_postprocess(hms, ori_sizes, do_blur):
    '''
    resize a batch of heatmaps to ori size and do Gaussian blur
    then find peak point
    :param hms:   (bs, 24, h, w) float np.array  0~1
    :param ori_size:   (bs, 2)   H, W  int np.array
    :return: (bs, 24, 2)    x,y  int np.array
    '''

    batch_peaks = np.zeros((hms.shape[0], hms.shape[1], 2))

    # loop along imgs
    for i in range(hms.shape[0]):

        # resize and blur
        H,W = ori_sizes[i]
        C = hms.shape[1]
        hms_resz = cv2.resize(hms[i].transpose((1,2,0)),dsize=(W,H),interpolation=cv2.INTER_LINEAR)
        # (H,W,24)

        if do_blur:
            hms_resz = cv2.GaussianBlur(hms_resz,ksize=(39,39),sigmaX=7)

        # find peak points in each key-point channel
        peaks_idxs = hms_resz.transpose((2,0,1)).reshape(C,-1).argmax(1)  #(C, )
        idx_y, idx_x = np.unravel_index(peaks_idxs, dims=(H, W))   # (C,), (C,)

        batch_peaks[i, :, 0] = idx_x
        batch_peaks[i, :, 1] = idx_y

    return batch_peaks


def attrpd_to_lmpd(attrpd, template, attr_to_cate):
    attrpd.columns = ['image_id', 'AttrKey', 'AttrValues']
    attrpd['select'] = attrpd['AttrKey'].apply(lambda x: True if x in attr_to_cate else False)
    attrpd = attrpd[attrpd['select'] == True].drop('select', axis=1)
    attrpd.index = range(attrpd.shape[0])

    attrpd['image_category'] = attrpd['AttrKey'].apply(lambda x: attr_to_cate[x])
    attrpd = attrpd[['image_id', 'image_category']]

    attrpd = pd.concat([template, attrpd], axis=0)
    attrpd = attrpd.fillna('0_0_1')
    attrpd = filt_valid_kp(attrpd)
    return attrpd[template.columns]


if __name__ == '__main__':


    rawdata_root = "/media/hszc/data1/FaishionAI"
    attr_train_pd, attr_val_pd = get_train_val_attr(rawdata_root)

    rawdata_root = "/media/hszc/data1/LandMarks"
    train_pd, val_pd = get_train_val(rawdata_root)
    template = train_pd[0:0]

    attr_train_pd = attrpd_to_lmpd(attr_train_pd, template, attr_to_cate)
    attr_val_pd = attrpd_to_lmpd(attr_val_pd, template, attr_to_cate)
    print attr_train_pd.info()
    print attr_val_pd.info()

    if not os.path.exists('../attr_to_crop'):
        os.makedirs('../attr_to_crop')

    attr_train_pd.to_csv('../attr_to_crop/attr_train_to_crop.csv',index=False)
    attr_val_pd.to_csv('../attr_to_crop/attr_val_to_crop.csv', index=False)

