"""Script for evaluating ap."""
import json
import logging

import cv2
import torch.multiprocessing

torch.multiprocessing.set_sharing_strategy('file_system')
import os
import sys

sys.path.append("/media/liyuke/share/AAA/part3/pose_estimation_master/")
import numpy as np
import torch
import torch.nn as nn
import argparse
from utils.config import update_config
import torch.utils.data
from tensorboardX import SummaryWriter
from tqdm import tqdm
from datasets.air2land import Air2land
from opt import cfg, logger, opt
from utils.logger import board_writing, debug_writing
from utils.metrics import DataLogger, calc_accuracy, calc_integral_accuracy, evaluate_mAP, calculate_ap
from utils.transforms import get_func_heatmap_to_coord
from models.fasterpose import FastPose
from utils.metrics import *


def px_acc(errs_corner2D):
    px_threshold = 5
    eps = 1e-5
    corner_acc = len(np.where(np.array(errs_corner2D) <= px_threshold)[0]) * 100. / (len(errs_corner2D) + eps)
    logging.info('corner_acc 5px: {:1.4f}'.format(corner_acc))
    return

def ADD_auc(PM_pre, kp3d_h_gt, PM_gt):
    # Xs = []
    # Ys = []
    # for i in range(71):
    #     x = 0.02 * i
    #     Xs.append(x)
    #     n, N, ticks3 = ADD_accuracy(PM_pre, kp3d_h_gt, PM_gt, 100 * x, P_is_matrix=True)
    #     y = n / N
    #     Ys.append(y)
    n, N, ticks3 = ADD_accuracy(PM_pre, kp3d_h_gt, PM_gt, 1.4, P_is_matrix=True)
    y = n / N
    logging.info("************for ADD***********")
    logger.info('Computing ADD(0.5d) for AUC,  y:{:.4f}'.format(y))
    # import sklearn.metrics as M
    # auc = M.auc(Xs, Ys)
    # if logger is not None:
    #     logger.info('AUC: {:1.4f}'.format(auc))
    return


def Pose_estimation(PM_pr_all,PM_gt_all):
    tot_e_t, tot_e_R, tot_e_pose = [], [], []
    thresholds = [5, 10, 20]
    logging.info("************for pose estimation***********")
    # Compute R t prediction error
    for i, pose_est in enumerate(tqdm(PM_pr_all)):
        pose_gt = PM_gt_all[i]
        dR = pose_gt[:3, :3]
        dt = pose_gt[: 3, 3]
        e_t, e_R = compute_pose_error(pose_est, dR, dt)
        e_pose = max(e_t, e_R)
        tot_e_t.append(e_t)
        tot_e_R.append(e_R)
        tot_e_pose.append(e_pose)
    tot_e_pose = np.array(tot_e_pose)
    auc = pose_auc(tot_e_pose, thresholds)
    acc_5 = (tot_e_pose < 5).mean()
    acc_10 = (tot_e_pose < 10).mean()
    acc_15 = (tot_e_pose < 15).mean()
    acc_20 = (tot_e_pose < 20).mean()
    map_5 = acc_5
    map_10 = np.mean([acc_5, acc_10])
    map_20 = np.mean([acc_5, acc_10, acc_15, acc_20])
    # logging.info()
    logging.info(f"pose auc: /{auc}")

    return

def validate_pose(m, opt, heatmap_to_coord, batch_size=20):
    # To save
    errs_corner2D = []

    K = get_K()
    gt_val_dataset = Air2land(cfg, mode="test")
    eval_joints = list(range(6))

    gt_val_loader = torch.utils.data.DataLoader(
        gt_val_dataset, batch_size=batch_size, shuffle=False, num_workers=6, drop_last=False)
    m.eval()

    norm_type = cfg.LOSS.get('NORM_TYPE', None)
    hm_size = cfg.DATA_PRESET.HEATMAP_SIZE
    kp2d_pre_all = None
    PM_gt_all = None
    kp3d_gt = np.loadtxt(cfg["DATASET"]["VAL"]["ROOT"] + "3D_param.txt")
    kp3d_gt = torch.tensor(kp3d_gt).unsqueeze(0).float()
    kp3d_h_gt = torch.concatenate((kp3d_gt, torch.ones((kp3d_gt.size(0), kp3d_gt.size(1), 1))), dim=2).float()
    for img_ors, inps, labels, label_masks, img_ids, bboxes, kp2d_gt, PM, _ in tqdm(gt_val_loader, dynamic_ncols=True):
        # pts2d = batch_project(PM, kpts_3d[0, ...], K, angle_axis=False)
        # for visualize
        # img_or = np.array(img_ors[0])
        # for point in pts2d[0,...]:
        #     cv2.circle(img_or,(int(point[0]),int(point[1])),5,(0,255,255))
        # cv2.imshow("out",img_or)
        # cv2.waitKey()
        kp2d_pre = torch.zeros((inps.size(0), kp3d_gt.size(1), 2))
        if isinstance(inps, list):
            inps = [inp.cuda() for inp in inps]
        else:
            inps = inps.cuda()
        pred = m(inps)
        # 换原到原始图片
        for i in range(pred.shape[0]):
            bbox = bboxes[i].tolist()
            pose_coords, pose_scores = heatmap_to_coord(
                pred[i][eval_joints], bbox, hm_shape=hm_size, norm_type=norm_type)

            kp2d_pre[i, ...] = torch.tensor(pose_coords)

        # Compute corner prediction error
        corner_norm = np.linalg.norm(kp2d_gt - kp2d_pre, axis=2)
        corner_dist = np.mean(corner_norm,axis=1)
        errs_corner2D.extend(corner_dist)
        # for visualize
        # img_or = np.array(img_ors[i])
        # for point in kpts_2d[i,...]:
        #     cv2.circle(img_or,(int(point[0]),int(point[1])),5,(0,255,255))
        # cv2.imshow("pred output",img_or)
        # cv2.waitKey()
        if kp2d_pre_all is None:
            kp2d_pre_all = kp2d_pre
        else:
            kp2d_pre_all = torch.concatenate((kp2d_pre_all, kp2d_pre), dim=0)
        if PM_gt_all is None:
            PM_gt_all = PM
        else:
            PM_gt_all = torch.concatenate((PM_gt_all, PM), dim=0)
    # Compute corner 2d prediction error
    px_acc(errs_corner2D)

    # Compute R t prediction error
    PM_pr_all = pnp(kp2d_pre_all, kp3d_gt, K)
    ADD_auc(PM_pr_all, kp3d_h_gt, PM_gt_all)

    # for pose estimation
    Pose_estimation(PM_pr_all,PM_gt_all)

    return


def main(opt, cfg):
    logger.info('******************************')
    logger.info(opt)
    logger.info('******************************')
    logger.info(cfg)
    logger.info('******************************')

    # Model Initialize
    m = FastPose(cfg["MODEL"])
    m.load_state_dict(torch.load(opt.weight))
    m.cuda()

    heatmap_to_coord = get_func_heatmap_to_coord(cfg)
    n = validate_pose(m, opt, heatmap_to_coord)


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="evaluation script.")

    parser.add_argument('--cfg', help='experiment configure file name', required=True, type=str)
    parser.add_argument('--weight', type=str, default='/homeLocal/guipotje/sshfs/datasets/coco_20k',
                        help='Path to the pretrained weight.')
    args = ["--cfg",
            "/media/liyuke/share/AAA/part3/pose_estimation_master/config/air2land/air2land_256x192_res50_lr1e-3_2x-dcn.yaml",
            "--weight",
            "/media/liyuke/share/AAA/part3/pose_estimation_master/exp/mse-air2land_256x192_res50_lr1e-3_2x-dcn.yaml/model_111.pth"]
    opt = parser.parse_args(args)

    cfg_file_name = os.path.basename(opt.cfg)
    cfg = update_config(opt.cfg)

    main(opt, cfg)
