import cv2
import numpy as np
import torch.nn.functional as F
from torch.autograd import Variable
import torch
from LMdata.LMdataset import cate2idx, Skdict, kp2idx,idx2cate,idx2kp,kpInCates
import os
import pandas as pd
from sklearn.model_selection import train_test_split
import hashlib

def get_1x_lr_params_NOscale(model):
    """
    This generator returns all the parameters of the net except for
    the last classification layer. Note that for each batchnorm layer,
    requires_grad is set to False in deeplab_resnet.py, therefore this function does not return
    any batchnorm parameter
    """
    b = []
    b.append(model.Scale.conv1)
    b.append(model.Scale.bn1)
    b.append(model.Scale.layer1)
    b.append(model.Scale.layer2)
    b.append(model.Scale.layer3)
    b.append(model.Scale.layer4)

    for i in range(len(b)):
        for j in b[i].modules():
            jj = 0
            for k in j.parameters():
                jj += 1
                if k.requires_grad:
                    yield k

def get_10x_lr_params(model):
    """
    This generator returns all the parameters for the last layer of the net,
    which does the classification of pixel into classes
    """

    b = []
    b.append(model.Scale.layer5.parameters())

    for j in range(len(b)):
        for i in b[j]:
            yield i


def gkern(im_size=512):
    """
    creates gaussian kernel with side length l and a sigma of sig
    """
    l = 29
    sig = 6.
    # l = 60
    # sig = 20

    l = max(int(l * im_size / 512), 1)
    # force l to be odd
    if not l % 2:
        l += 1
    sig = sig * im_size / 512

    ax = np.arange(-l // 2 + 1., l // 2 + 1.)
    xx, yy = np.meshgrid(ax, ax)

    kernel = np.exp(-(xx ** 2 + yy ** 2) / (2. * sig ** 2))

    return kernel / np.sum(kernel)

def genGauKerVar(kernel, nums=24):
    '''
    :param kernel: numpy , shape of (H,W)
    :return: Variable , shape of (nums, 1 H, W)

    pad_needed_height = ks-1
    pad_top = pad_needed_height//2

    use
    out = F.conv2d(input=img_var, weight= kernel, groups=nums, padding=pad)
    to get blurred img or mask on GPU
    '''
    ks = kernel.shape[0]
    kernel = kernel[np.newaxis, np.newaxis, :,:]
    kernel = np.repeat(kernel,nums,axis=0)

    pad_needed = ks-1
    pad = pad_needed//2

    return Variable(torch.from_numpy(kernel)).float().cuda(), pad

def drop_shitdata(df):
    for cate in cate2idx:
        cate_idx = cate2idx[cate]
        kp1_idx = Skdict[cate_idx][0]
        kp1 = idx2kp[kp1_idx]
        kp2_idx = Skdict[cate_idx][1]
        kp2 = idx2kp[kp2_idx]

        drop_idx = df[(df['image_category']==cate) & (df[kp1]==df[kp2])].index
        df.drop(drop_idx,axis=0,inplace=True)
        print '%s : drop shit data: %d'%(cate, len(drop_idx))
    return df

def add_fake_kp(df, idx2kp_map):
    for idx in idx2kp_map:
        df[idx2kp_map[idx]] = '1_1_1'
    return df

def filt_valid_kp(df):
    for cate in df['image_category'].unique():
        cate_idx = cate2idx[cate]
        for col in df.drop(['image_id', 'image_category'],axis=1).columns:
            kp_idx = kp2idx[col]
            if kp_idx not in kpInCates[cate_idx]:
                print cate,col
                df.loc[df['image_category']==cate, col] = '-1_-1_-1'

    return df

def get_label_from_df(df):
    true_xyv = np.zeros((df.shape[0], 24, 3), dtype=np.int)
    for i in range(24):
        true_xyv[:, i, 0] = df[idx2kp[i]].str.split('_').str[0].astype(int)  # x coord [-1,512]
        true_xyv[:, i, 1] = df[idx2kp[i]].str.split('_').str[1].astype(int)  # y coord [-1,512]
        true_xyv[:, i, 2] = df[idx2kp[i]].str.split('_').str[2].astype(int)  # vis  [-1, 0, 1]

    return true_xyv


def join_path_to_df(df, *path_to_join ):
    path_to_join = os.path.join(*path_to_join)
    df['image_id'] = df['image_id'].apply(lambda x: os.path.join(path_to_join, x))
    return df

def get_train_val(root_path):

    warmup_path = os.path.join(root_path, 'round1/warm_up')
    round_1_train_path = os.path.join(root_path, 'round1/train')
    test_a_path = os.path.join(root_path, 'round1/test_a')
    test_b_path = os.path.join(root_path, 'round1/test_b')

    test_a_csv = os.path.join(test_a_path, 'fashionAI_key_points_test_a_answer_20180426.csv')
    test_b_csv = os.path.join(test_b_path, 'fashionAI_key_points_test_b_answer_20180426.csv')
    round_1_train_csv = os.path.join(round_1_train_path, 'Annotations/train.csv')
    warmup_train_csv = os.path.join(warmup_path, 'Annotations/annotations.csv')

    anno_test_a = pd.read_csv(test_a_csv)
    anno_test_b = pd.read_csv(test_b_csv)
    anno_round_1 = pd.read_csv(round_1_train_csv)
    anno_warmup = pd.read_csv(warmup_train_csv)

    anno_test_a["image_id"] = anno_test_a["image_id"].apply(lambda x:os.path.join(test_a_path,x))
    anno_test_b["image_id"] = anno_test_b["image_id"].apply(lambda x:os.path.join(test_b_path,x))
    anno_round_1["image_id"] = anno_round_1["image_id"].apply(lambda x:os.path.join(round_1_train_path,x))
    anno_warmup["image_id"] = anno_warmup["image_id"].apply(lambda x:os.path.join(warmup_path,x))

    train_test_a,val_test_a = train_test_split(anno_test_a,test_size = 0.3,stratify=anno_test_a["image_category"],random_state=42)
    train_test_b,val_test_b = train_test_split(anno_test_b,test_size = 0.3,stratify=anno_test_b["image_category"],random_state=42)

    train_pd = anno_warmup.append(train_test_a).append(train_test_b).append(anno_round_1)
    val_pd = val_test_a.append(val_test_b)

    train_pd_hash = set(train_pd["image_id"].apply(lambda x:hashlib.md5(open(x,"rb").read()).hexdigest()))
    val_pd_hash = val_pd["image_id"].apply(lambda x:hashlib.md5(open(x,"rb").read()).hexdigest())
    val_index = list(map(lambda x:x not in train_pd_hash,val_pd_hash))
    val_pd_filtered = val_pd[val_index]

    return train_pd,val_pd_filtered




def batch_postprocess(hms, ori_sizes, do_blur):
    '''
    resize a batch of heatmaps to ori size and do Gaussian blur
    then find peak point
    :param hms:   (bs, 24, h, w) float np.array  0~1
    :param ori_size:   (bs, 2)   H, W  int np.array
    :return: (bs, 24, 2)    x,y  int np.array
    '''

    batch_peaks = np.zeros((hms.shape[0], hms.shape[1], 2))

    # loop along imgs
    for i in range(hms.shape[0]):

        # resize and blur
        H,W = ori_sizes[i]
        C = hms.shape[1]
        hms_resz = cv2.resize(hms[i].transpose((1,2,0)),dsize=(W,H),interpolation=cv2.INTER_LINEAR)
        # (H,W,24)

        if do_blur:
            hms_resz = cv2.GaussianBlur(hms_resz,ksize=(39,39),sigmaX=7)

        # find peak points in each key-point channel
        peaks_idxs = hms_resz.transpose((2,0,1)).reshape(C,-1).argmax(1)  #(C, )
        idx_y, idx_x = np.unravel_index(peaks_idxs, dims=(H, W))   # (C,), (C,)

        batch_peaks[i, :, 0] = idx_x
        batch_peaks[i, :, 1] = idx_y

    return batch_peaks






# if __name__ == '__main__':
#     from matplotlib import pyplot as plt
#     import os
#     import cv2
#
#     im_size = 100
#     label = np.zeros((1,24,im_size,im_size),dtype=np.float)
#     label[0,0,im_size//4,im_size//4] = 1
#     label[0,1,im_size//2,im_size//2] = 1
#
#
#
#     kernel = gkern(im_size)
#     print kernel.shape
#     kernel_var,pad = genGauKerVar(kernel, nums=24)
#
#
#
#     os.environ["CUDA_VISIBLE_DEVICES"] = "0"
#
#
#     lb_ts = Variable(torch.from_numpy(label)).float().cuda()
#
#
#     out = F.conv2d(input=lb_ts, weight= kernel_var, groups=24, padding=pad)
#     print out.size()
#     im1 = out[0,0].data.cpu().numpy()
#     im2 = out[0,1].data.cpu().numpy()
#
#     im1_resize = cv2.resize(im1,(512,512))
#     im2_resize = cv2.resize(im2, (512, 512))
#
#     plt.subplot(221)
#     plt.imshow(im1)
#     plt.subplot(222)
#     plt.imshow(im2)
#     plt.subplot(223)
#     plt.imshow(im1_resize)
#     plt.subplot(224)
#     plt.imshow(im2_resize)
#     plt.show()
