import os
import time
import pandas as pd
from sklearn.model_selection import train_test_split
from LMdata.LMdataset import LMdata, collate_fn
import torch
import torch.utils.data as torchdata
from utils.train import train, trainlog
from LMdata.LMaug import *
from models.DRN import *
from utils.preprocessing import *
from utils.predicting import *
from models.GCN import GCN

def pred_val(root_path, model, input_size, save_dir, model_name):

    class valAug(object):
        def __init__(self):
            self.augment = Compose([
                ResizeImg(size=input_size),
                Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
            ])

        def __call__(self, *args):
            return self.augment(*args)



    # prepare train val
    train_pd, val_pd = get_train_val(root_path)
    val_pd = drop_shitdata(val_pd.copy())

    train_pd.index = range(train_pd.shape[0])
    val_pd.index = range(val_pd.shape[0])


    print val_pd.shape
    print val_pd.image_category.value_counts()

    data_set = LMdata(val_pd, valAug())
    data_loader = torchdata.DataLoader(data_set, 8, num_workers=4, shuffle=False,
                                       pin_memory=True, collate_fn=collate_fn)

    # predict through dataset
    t0 = time.time()
    preds_xy, _, cates = predict(model, data_set, data_loader, do_blur=False, counting=True)
    
    
    t1 = time.time()

    # do evaluation
    true_xy = get_label_from_df(val_pd)
    score, details = cal_NEscore(preds_xy, true_xy, cates)

    # show results
    print('time-cost: %d s' % (t1-t0))
    print('val-NE: %.4f%%' % (score*100))
    for key in details.keys():
        print('NE: %.3f%% supports: %d  %s' %
              (details[key][0] * 100, details[key][1], key))


    # generate sub file
    sub = pd.DataFrame()
    sub['image_id'] = data_set.paths
    sub['image_category'] = data_set.cates

    for i in range(data_set.num_kps):
        kp_str = idx2kp[i]
        sub[kp_str] = preds_xy[:,i,0].astype(str)
        sub[kp_str] = sub[kp_str].str[:] + \
                      '_' + preds_xy[:,i,1].astype(str) + \
                      '_' + true_xy[:,i,2].astype(str)

        sub[kp_str][true_xy[:,i,2]==-1] = '-1_-1_-1'

    sub.to_csv(os.path.join(save_dir, model_name+'.csv') ,index=False)
    val_pd.to_csv(os.path.join(save_dir, 'val_pd.csv'),index=False)
    np.save(os.path.join(save_dir, 'val_cates.npy'), cates)


if __name__ == '__main__':

    os.environ["CUDA_VISIBLE_DEVICES"] = '0'
    root_path = "/media/hszc/data1/LandMarks"
    resume = '/home/hszc/zhangchi/models/Landmarks/round2/GCN368_bs8_base/weights-13-3529-[0.0443].pth'
    model_name = 'GCN336_bs8_4.43'
    save_dir = './val2'
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)

    model = GCN(num_classes=24,layers=50)
    model.eval()
    try:
        model.load_state_dict(torch.load(resume))
    except KeyError:
        model = torch.nn.DataParallel(model)
        model.load_state_dict(torch.load(resume))
    model.cuda()

    pred_val(root_path, model, input_size=(368, 368), save_dir=save_dir, model_name=model_name)

