import os
import time
import pandas as pd
from sklearn.model_selection import train_test_split
from LMdata.LMdataset import LMdata, collate_fn
import torch
import torch.utils.data as torchdata
from utils.train import train, trainlog
from LMdata.LMaug import *
from models.DRN import *
from utils.preprocessing import *
from utils.predicting import *
from LMdata.LMdataset import idx2kp
from copy import deepcopy
from models.GCN import GCN

def pred_online(root_path, model, input_size):

    class testAug(object):
        def __init__(self):
            self.augment = Compose([
                ResizeImg(size=input_size),
                Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
            ])

        def __call__(self, *args):
            return self.augment(*args)

    annotation = pd.read_csv(os.path.join(root_path, './round2/test_a/test.csv'))
    ori_ids = deepcopy(annotation['image_id'].tolist())

    annotation = join_path_to_df(annotation, root_path, 'round2/test_a')
    test_pd = add_fake_kp(annotation, idx2kp)

    print test_pd.shape
    print test_pd.image_category.value_counts()

    data_set = LMdata(test_pd, testAug())
    data_loader = torchdata.DataLoader(data_set, 8, num_workers=4, shuffle=False,
                                       pin_memory=True, collate_fn=collate_fn)

    # predict through dataset
    t0 = time.time()
    preds_xy, _, cates = predict(model, data_set, data_loader, do_blur=False, counting=True)
    t1 = time.time()

    print('time-cost: %d s' % (t1 - t0))

    # generate sub file
    sub = pd.DataFrame()
    sub['image_id'] = data_set.paths
    sub['image_category'] = data_set.cates

    for i in range(data_set.num_kps):
        kp_str = idx2kp[i]
        sub[kp_str] = preds_xy[:,i,0].astype(str)
        sub[kp_str] = sub[kp_str].str[:] + \
                      '_' + preds_xy[:,i,1].astype(str) + \
                      '_1'

    sub['image_id'] = ori_ids

    return sub


if __name__ == '__main__':

    os.environ["CUDA_VISIBLE_DEVICES"] = '1'
    root_path = "/media/hszc/data1/LandMarks"
    resume = '/home/hszc/zhangchi/models/Landmarks/round2/GCN368_bs8_base/weights-13-3529-[0.0443].pth'

    model = GCN(num_classes=24,layers=50)
    model.eval()
    try:
        model.load_state_dict(torch.load(resume))
    except KeyError:
        model = torch.nn.DataParallel(model)
        model.load_state_dict(torch.load(resume))
    model.cuda()

    sub = pred_online(root_path, model, input_size=(368, 368))

    if not os.path.exists('./pred_online2/csvs'):
        os.makedirs('./pred_online2/csvs')
    sub.to_csv('./pred_online2/csvs/GCN368_bs8_base-4.43.csv',index=False)

