import os
import time
import pandas as pd
from sklearn.model_selection import train_test_split
from LMdata.LMdataset import LMdata, collate_fn
import torch
import torch.utils.data as torchdata
from utils.train import train, trainlog
from LMdata.LMaug import *
from models.DRN import *
from utils.preprocessing import *
from utils.predicting import *
from models.GCN import GCN
from utils.plotting import DrawKeyPoints
from copy import deepcopy

def pred_online(img_root, model, input_sizes):

    class valAug(object):
        def __init__(self, size):
            self.augment = Compose([
                ResizeImg(size=size),
                Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
            ])

        def __call__(self, *args):
            return self.augment(*args)



    annotation = pd.read_csv(os.path.join(root_path, './round2/test_a/test.csv'))
    ori_ids = deepcopy(annotation['image_id'].tolist())

    annotation = join_path_to_df(annotation, root_path, 'round2/test_a')
    test_pd = add_fake_kp(annotation, idx2kp)

    print test_pd.shape


    data_set = {}
    data_loader = {}
    for i,size in enumerate(input_sizes):
        data_set[i] = LMdata(test_pd, valAug(size=size))
        data_loader[i] = enumerate(torchdata.DataLoader(data_set[i], 1, num_workers=2,
                                    shuffle=False,pin_memory=True, collate_fn=collate_fn))

    num_smps = test_pd.shape[0]
    num_kps = data_set[0].num_kps
    cates = np.array(test_pd.image_category.apply(lambda x: cate2idx[x]).tolist())
    preds_xy = np.empty((num_smps, num_kps, 2),dtype=np.float)  # (N,24,2) x y

    idx = 0
    t0 = time.time()
    while idx < num_smps:
        outputs = []
        print('%d / %d' % (idx, num_smps))
        for i, size in enumerate(input_sizes):
            _, single_data = next(data_loader[i])
            imgs, _, vis_masks, ori_sizes, _, _ = single_data
            imgs = Variable(imgs).cuda()
            vis_masks = vis_masks[0].numpy()


            output = F.sigmoid(model(imgs)).data.cpu().numpy()[0]  # (24,H,W)
            output = np.transpose(output, (1, 2, 0))  #(H,W,24)

            # resize to ori size
            img_h, img_w = ori_sizes[0]
            output = cv2.resize(output, (img_w, img_h), interpolation=cv2.INTER_CUBIC)

            # deal with value stability
            output[np.where(output >= 0.99)] = 0.999
            output[np.where(output <= 0.01)] = 0.001

            # get logits back
            output = -np.log(1 / output - 1)
            outputs.append(output)

        # avg on logits from different input-sizes
        predi = np.mean(outputs, axis=0)
        predi = np.transpose(predi, (2,0,1))  # kp-channel first  (24, img_h, img_w)

        # find peak point
        peaks_idx = predi.reshape(num_kps, -1).argmax(1)   # (C,)
        idx_y, idx_x = np.unravel_index(peaks_idx, dims=(img_h, img_w))  #(C, ), (C, )

        preds_xy[idx, :, 0] = idx_x
        preds_xy[idx, :, 1] = idx_y

        idx += 1
    preds_xy = preds_xy.astype(int)
    t1 = time.time()

    # generate sub file
    sub = pd.DataFrame()
    sub['image_id'] = data_set[0].paths
    sub['image_category'] = data_set[0].cates

    for i in range(data_set[0].num_kps):
        kp_str = idx2kp[i]
        sub[kp_str] = preds_xy[:,i,0].astype(str)
        sub[kp_str] = sub[kp_str].str[:] + \
                      '_' + preds_xy[:,i,1].astype(str) + \
                      '_1'

    sub['image_id'] = ori_ids

    return sub

if __name__ == '__main__':

    os.environ["CUDA_VISIBLE_DEVICES"] = '1'
    root_path = "/media/hszc/data1/LandMarks"
    resume = '/home/hszc/zhangchi/models/Landmarks/round2/GCN368_bs8_base/weights-13-3529-[0.0443].pth'
    model_name = 'GCN368_bs8_4.43_(256-368-512)'

    model = GCN(num_classes=24,layers=50)
    model.eval()
    try:
        model.load_state_dict(torch.load(resume))
        print('loaded')
    except KeyError:
        model = torch.nn.DataParallel(model)
        model.load_state_dict(torch.load(resume))
        print('loaded')
    model.cuda()

    input_sizes = [(256,256),(368,368),(512,512)]
    sub = pred_online(root_path, model, input_sizes)


    if not os.path.exists('./pred_online2/csvs'):
        os.makedirs('./pred_online2/csvs')
    sub.to_csv('./pred_online2/csvs/%s.csv'%(model_name),index=False)
