#coding=utf8
from __future__ import division
import time
from torch.autograd import Variable
import torch.nn.functional as F
import numpy as np
import pandas as pd
from metrics import cal_mAP
from FSdata.FSdataset import attr2catidx_map, attr2idx_map, idx2attr_map
import logging
import cv2
import torch
import itertools

def predict(
        model,
        data_set,
        data_loader,
        whileTraing=False
):
    t0 = time.time()
    model.eval()

    val_preds = np.zeros((len(data_set), data_set.catlen), dtype=np.float32)
    val_true = np.zeros((len(data_set), data_set.catlen), dtype=np.float32)
    val_attr = np.zeros(len(data_set), dtype=np.int)
    val_attr_mask = np.zeros((len(data_set), data_set.catlen), dtype=np.int)
    val_labels_str = np.empty((len(data_set), data_set.catlen), dtype='|S1')

    idx = 0
    for batch_cnt_val, data_val in enumerate(data_loader):
        if not whileTraing:
            print '%d/%d'%(batch_cnt_val, len(data_set)/data_loader.batch_size)
        inputs, attr, attr_mask, labels, labels_str = data_val

        inputs = Variable(inputs.cuda(), volatile=True)
        labels = Variable(labels.cuda(), volatile=True)
        attr_mask = Variable(attr_mask.cuda(), volatile=True)

        # forward
        outputs = model(inputs)

        # statistics
        val_preds[idx:(idx + labels.size(0))] = outputs.data.cpu().numpy()
        val_true[idx:(idx + labels.size(0))] = labels.data.cpu().numpy()
        val_attr[idx:(idx + labels.size(0))] = attr
        val_attr_mask[idx:(idx + labels.size(0))] = attr_mask.data.cpu().numpy()
        val_labels_str[idx:(idx + labels.size(0))] = labels_str

        idx += labels.size(0)

    preds_y = np.argmax(val_preds * val_attr_mask,1)
    labels_yonly = np.argmax(val_true,1)

    val_acc = (preds_y==labels_yonly).sum() * 1. /  labels_yonly.shape[0]
    val_mAP, APs, accs = cal_mAP(val_labels_str, val_preds, val_attr, data_set.catidx_map)

    t1 = time.time()
    since = t1 - t0
    if whileTraing:
        logging.info('val-acc@1: %.4f ||val-mAP: %.4f ||time: %d'
                     % (val_acc, val_mAP, since))
        for key in APs.keys():
            logging.info('acc: %.4f, AP: %.4f %s' % (accs[key], APs[key], key))

        return val_mAP, val_acc

    else:
        print('val-acc@1: %.4f ||val-mAP: %.4f ||time: %d'
                     % (val_acc, val_mAP, since))
        for key in APs.keys():
            print('acc: %.4f, AP: %.4f %s' % (accs[key], APs[key], key))


        return val_preds, val_labels_str, val_attr, val_true


def predict2(
        model,
        data_set,
        data_loader,
        whileTraing=False
):
    t0 = time.time()
    model.eval()

    val_preds1 = np.zeros((len(data_set), data_set.catlen), dtype=np.float32)
    val_preds2 = np.zeros((len(data_set), data_set.catlen), dtype=np.float32)
    val_preds_avg = np.zeros((len(data_set), data_set.catlen), dtype=np.float32)

    val_true = np.zeros((len(data_set), data_set.catlen), dtype=np.float32)
    val_attr = np.zeros(len(data_set), dtype=np.int)
    val_attr_mask = np.zeros((len(data_set), data_set.catlen), dtype=np.int)
    val_labels_str = np.empty((len(data_set), data_set.catlen), dtype='|S1')

    idx = 0
    for batch_cnt_val, data_val in enumerate(data_loader):
        if not whileTraing:
            print '%d/%d'%(batch_cnt_val, len(data_set)/data_loader.batch_size)
        inputs, attr, attr_mask, labels, labels_str = data_val

        inputs = Variable(inputs.cuda(),volatile=True)
        labels = Variable(labels.cuda(),volatile=True)
        attr_mask = Variable(attr_mask.cuda(),volatile=True)

        # forward
        outputs1, outputs2 = model(inputs)

        # statistics
        val_preds1[idx:(idx + labels.size(0))] = outputs1.data.cpu().numpy()
        val_preds2[idx:(idx + labels.size(0))] = outputs2.data.cpu().numpy()
        val_true[idx:(idx + labels.size(0))] = labels.data.cpu().numpy()
        val_attr[idx:(idx + labels.size(0))] = attr
        val_attr_mask[idx:(idx + labels.size(0))] = attr_mask.data.cpu().numpy()
        val_labels_str[idx:(idx + labels.size(0))] = labels_str

        idx += labels.size(0)

    val_preds_avg = (val_preds1 + val_preds2)*0.5

    preds_y1 = np.argmax(val_preds1 * val_attr_mask,1)
    preds_y2 = np.argmax(val_preds2 * val_attr_mask, 1)
    preds_y_avg = np.argmax(val_preds_avg * val_attr_mask, 1)
    labels_yonly = np.argmax(val_true,1)

    # stage1
    val_acc1 = (preds_y1==labels_yonly).sum() * 1. /  labels_yonly.shape[0]
    val_mAP1, APs1, accs1 = cal_mAP(val_labels_str, val_preds1, val_attr, data_set.catidx_map)

    val_acc2 = (preds_y2==labels_yonly).sum() * 1. /  labels_yonly.shape[0]
    val_mAP2, APs2, accs2 = cal_mAP(val_labels_str, val_preds2, val_attr, data_set.catidx_map)

    val_acc_avg = (preds_y_avg==labels_yonly).sum() * 1. /  labels_yonly.shape[0]
    val_mAP_avg, APs_avg, accs_avg = cal_mAP(val_labels_str, val_preds_avg, val_attr, data_set.catidx_map)

    t1 = time.time()
    since = t1 - t0
    if whileTraing:
        logging.info('val-acc(1): %.4f ||val-mAP(1): %.4f ||time: %d'
                     % (val_acc1, val_mAP1, since))
        for key in APs1.keys():
            logging.info('acc: %.4f, AP: %.4f %s' % (accs1[key], APs1[key], key))

        logging.info('--'*30)
        logging.info('val-acc(2): %.4f ||val-mAP(2): %.4f ||time: %d'
                     % (val_acc2, val_mAP2, since))
        for key in APs2.keys():
            logging.info('acc: %.4f, AP: %.4f %s' % (accs2[key], APs2[key], key))

        logging.info('--'*30)
        logging.info('val-acc(avg): %.4f ||val-mAP(avg): %.4f ||time: %d'
                     % (val_acc_avg, val_mAP_avg, since))
        for key in APs_avg.keys():
            logging.info('acc: %.4f, AP: %.4f %s' % (accs_avg[key], APs_avg[key], key))


        return val_mAP1, val_acc1

    else:
        print('val-acc(avg): %.4f ||val-mAP(avg): %.4f ||time: %d'
                     % (val_acc_avg, val_mAP_avg, since))
        for key in APs_avg.keys():
            print('acc: %.4f, AP: %.4f %s' % (accs_avg[key], APs_avg[key], key))


        return val_preds_avg, val_labels_str, val_attr, val_true



def predict_addlm(
        model,
        model_lm,
        data_set,
        data_loader,
        whileTraing=False
):
    t0 = time.time()
    model.eval()

    val_preds = np.zeros((len(data_set), data_set.catlen), dtype=np.float32)
    val_true = np.zeros((len(data_set), data_set.catlen), dtype=np.float32)
    val_attr = np.zeros(len(data_set), dtype=np.int)
    val_attr_mask = np.zeros((len(data_set), data_set.catlen), dtype=np.int)
    val_labels_str = np.empty((len(data_set), data_set.catlen), dtype='|S1')

    idx = 0
    for batch_cnt_val, data_val in enumerate(data_loader):
        if not whileTraing:
            print '%d/%d'%(batch_cnt_val, len(data_set)/data_loader.batch_size)
        inputs, attr, attr_mask, labels, labels_str = data_val

        inputs = Variable(inputs.cuda())
        labels = Variable(labels.cuda())
        attr_mask = Variable(attr_mask.cuda())

        # lm output
        lm_out = F.sigmoid(model_lm(inputs))  # (N,24,336,336)
        lm_fea = extract_lm_fea(lm_out)  # (N,276)
        lm_fea = Variable(lm_fea)

        # forward
        outputs = model(inputs, lm_fea)

        # statistics
        val_preds[idx:(idx + labels.size(0))] = outputs.data.cpu().numpy()
        val_true[idx:(idx + labels.size(0))] = labels.data.cpu().numpy()
        val_attr[idx:(idx + labels.size(0))] = attr
        val_attr_mask[idx:(idx + labels.size(0))] = attr_mask.data.cpu().numpy()
        val_labels_str[idx:(idx + labels.size(0))] = labels_str

        idx += labels.size(0)

    preds_y = np.argmax(val_preds * val_attr_mask,1)
    labels_yonly = np.argmax(val_true,1)

    val_acc = (preds_y==labels_yonly).sum() * 1. /  labels_yonly.shape[0]
    val_mAP, APs, accs = cal_mAP(val_labels_str, val_preds, val_attr, data_set.catidx_map)

    t1 = time.time()
    since = t1 - t0
    if whileTraing:
        logging.info('val-acc@1: %.4f ||val-mAP: %.4f ||time: %d'
                     % (val_acc, val_mAP, since))
        for key in APs.keys():
            logging.info('acc: %.4f, AP: %.4f %s' % (accs[key], APs[key], key))

        return val_mAP, val_acc

    else:
        print('val-acc@1: %.4f ||val-mAP: %.4f ||time: %d'
                     % (val_acc, val_mAP, since))
        for key in APs.keys():
            print('acc: %.4f, AP: %.4f %s' % (accs[key], APs[key], key))


        return val_preds, val_labels_str, val_attr, val_true



def batch_postprocess(hms):
    '''
    resize a batch of heatmaps to ori size and do Gaussian blur
    then find peak point
    :param hms:   (bs, 24, h, w) float np.array  0~1
    :param ori_size:   (bs, 2)   H, W  int np.array
    :return: (bs, 24, 2)    x,y  int np.array
    '''

    batch_peaks = np.zeros((hms.shape[0], hms.shape[1], 2),dtype=np.float)
    C,H,W = hms.shape[1:4]

    # loop along imgs
    for i in range(hms.shape[0]):


        # find peak points in each key-point channel
        peaks_idxs = hms[i].transpose((2,0,1)).reshape(C,-1).argmax(1)  #(C, )
        idx_y, idx_x = np.unravel_index(peaks_idxs, dims=(H, W))   # (C,), (C,)

        batch_peaks[i, :, 0] = idx_x
        batch_peaks[i, :, 1] = idx_y

    batch_peaks[:,:,0] = batch_peaks[:,:,0] / W
    batch_peaks[:, :, 1] = batch_peaks[:, :, 1] / H
    return batch_peaks




def extract_lm_fea(lm_out):
    '''
    :param lm_out:  Tensor of (N, 24, 336, 336)
    :return:
    '''
    batch_peaks = batch_postprocess(lm_out.data.cpu().numpy()).reshape(-1,48) #(N, 24 ,2)
    # C = batch_peaks.shape[1]
    # N = batch_peaks.shape[0]
    # ret = np.empty((N, C*(C-1)//2),dtype=np.float)
    # for i,comb in enumerate(itertools.combinations(range(C),2)):
    #     temp = np.sqrt((batch_peaks[:, comb[0], 0] - batch_peaks[:, comb[1], 0])**2 + \
    #                     (batch_peaks[:, comb[0], 1] - batch_peaks[:, comb[1], 1]) ** 2)
    #
    #     ret[:,i] = temp


    return torch.from_numpy(batch_peaks).float().cuda()   #(N, C*(C-1)//2 )



def gen_submission(test_pd, preds, catidx_map):
    test_pd = test_pd[['ImageName', 'AttrKey']].copy()
    test_pd['AttrValueProbs'] = list(preds)

    for Attr_idx in catidx_map.keys():
        Attr = idx2attr_map[Attr_idx]
        idx1 = catidx_map[Attr_idx][0]
        idx2 = catidx_map[Attr_idx][1]

        test_pd.loc[test_pd['AttrKey'] == Attr, 'AttrValueProbs'] = \
            test_pd.loc[test_pd['AttrKey']==Attr, 'AttrValueProbs'].apply(
            lambda probs: ';'.join(['%.5f'%x for x in list(probs[idx1:idx2])]))
    return test_pd

