import os
import time
import pandas as pd
from sklearn.model_selection import train_test_split
from LMdata.LMdataset import LMdata, collate_fn
import torch
import torch.utils.data as torchdata
from utils.train import train, trainlog
from LMdata.LMaug import *
from models.DRN import *
from utils.preprocessing import *
from utils.predicting import *
from models.GCN import GCN
from utils.plotting import DrawKeyPoints

def pred_val(root_path, model, input_sizes,save_dir, model_name):

    class valAug(object):
        def __init__(self, size):
            self.augment = Compose([
                ResizeImg(size=size),
                Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
            ])

        def __call__(self, *args):
            return self.augment(*args)



    # prepare train val
    train_pd, val_pd = get_train_val(root_path)
    val_pd = drop_shitdata(val_pd.copy())

    train_pd.index = range(train_pd.shape[0])
    val_pd.index = range(val_pd.shape[0])
    print val_pd.shape
    print val_pd.image_category.value_counts()


    data_set = {}
    data_loader = {}
    for i,size in enumerate(input_sizes):
        data_set[i] = LMdata(val_pd, valAug(size=size))
        data_loader[i] = enumerate(torchdata.DataLoader(data_set[i], 1, num_workers=2,
                                    shuffle=False,pin_memory=True, collate_fn=collate_fn))

    num_smps = val_pd.shape[0]
    num_kps = data_set[0].num_kps
    cates = np.array(val_pd.image_category.apply(lambda x: cate2idx[x]).tolist())
    preds_xy = np.empty((num_smps, num_kps, 2),dtype=np.float)  # (N,24,2) x y

    idx = 0
    t0 = time.time()
    while idx < num_smps:
        outputs = []
        print('%d / %d' % (idx, num_smps))
        for i, size in enumerate(input_sizes):
            _, single_data = next(data_loader[i])
            imgs, _, vis_masks, ori_sizes, _, _ = single_data
            imgs = Variable(imgs).cuda()
            vis_masks = vis_masks[0].numpy()


            output = F.sigmoid(model(imgs)).data.cpu().numpy()[0]  # (24,H,W)
            output = np.transpose(output, (1, 2, 0))  #(H,W,24)

            # resize to ori size
            img_h, img_w = ori_sizes[0]
            output = cv2.resize(output, (img_w, img_h), interpolation=cv2.INTER_CUBIC)

            # deal with value stability
            output[np.where(output >= 0.99)] = 0.999
            output[np.where(output <= 0.01)] = 0.001

            # get logits back
            output = -np.log(1 / output - 1)
            outputs.append(output)

        # avg on logits from different input-sizes
        predi = np.mean(outputs, axis=0)
        predi = np.transpose(predi, (2,0,1))  # kp-channel first  (24, img_h, img_w)

        # find peak point
        peaks_idx = predi.reshape(num_kps, -1).argmax(1)   # (C,)
        idx_y, idx_x = np.unravel_index(peaks_idx, dims=(img_h, img_w))  #(C, ), (C, )

        preds_xy[idx, :, 0] = idx_x
        preds_xy[idx, :, 1] = idx_y

        idx += 1

    t1 = time.time()

    # do evaluation
    true_xy = get_label_from_df(val_pd)
    score, details = cal_NEscore(preds_xy, true_xy, cates)

    # show results
    print('time-cost: %d s' % (t1-t0))
    print('val-NE: %.4f%%' % (score*100))
    for key in details.keys():
        print('NE: %.3f%% supports: %d  %s' %
              (details[key][0] * 100, details[key][1], key))


    preds_xy = preds_xy.astype(int)
    t1 = time.time()

    # generate sub file
    sub = pd.DataFrame()
    sub['image_id'] = data_set[0].paths
    sub['image_category'] = data_set[0].cates

    for i in range(data_set[0].num_kps):
        kp_str = idx2kp[i]
        sub[kp_str] = preds_xy[:,i,0].astype(str)
        sub[kp_str] = sub[kp_str].str[:] + \
                      '_' + preds_xy[:,i,1].astype(str) + \
                      '_1'
    sub.to_csv(os.path.join(save_dir, model_name+'.csv') ,index=False)
    val_pd.to_csv(os.path.join(save_dir, 'val_pd.csv'),index=False)
    np.save(os.path.join(save_dir, 'val_cates.npy'), cates)
    return sub


if __name__ == '__main__':

    os.environ["CUDA_VISIBLE_DEVICES"] = '0'
    root_path = "/media/hszc/data1/LandMarks"
    resume = '/home/hszc/zhangchi/models/Landmarks/round2/GCN368_bs8_base/weights-13-3529-[0.0443].pth'
    model_name = 'GCN336_bs8_4.43_(256-368-512)'
    save_dir = './val2'
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)

    model = GCN(num_classes=24,layers=50)
    model.eval()
    try:
        model.load_state_dict(torch.load(resume))
        print('loaded')
    except KeyError:
        model = torch.nn.DataParallel(model)
        model.load_state_dict(torch.load(resume))
        print('loaded')
    model.cuda()

    input_sizes = [(256, 256), (368, 368), (512, 512)]
    pred_val(root_path, model, input_sizes, save_dir, model_name)