# -----------------------------------------------------
# Copyright (c) Shanghai Jiao Tong University. All rights reserved.
# Written by Jiefeng Li (jeff.lee.sjtu@gmail.com)
# -----------------------------------------------------

import os
import sys
import cv2 as cv
import rowan
import kornia as kn
import numpy as np
import torch
import torch.nn.functional as F
from .transforms import get_max_pred_batch, _integral_tensor


class DataLogger(object):
    """Average data logger."""

    def __init__(self):
        self.clear()

    def clear(self):
        self.value = 0
        self.sum = 0
        self.cnt = 0
        self.avg = 0

    def update(self, value, n=1):
        self.value = value
        self.sum += value * n
        self.cnt += n
        self._cal_avg()

    def _cal_avg(self):
        self.avg = self.sum / self.cnt


def calc_iou(pred, target):
    """Calculate mask iou"""
    if isinstance(pred, torch.Tensor):
        pred = pred.cpu().data.numpy()
    if isinstance(target, torch.Tensor):
        target = target.cpu().data.numpy()

    pred = pred >= 0.5
    target = target >= 0.5

    intersect = (pred == target) * pred * target
    union = np.maximum(pred, target)

    if pred.ndim == 2:
        iou = np.sum(intersect) / np.sum(union)
    elif pred.ndim == 3 or pred.ndim == 4:
        n_samples = pred.shape[0]
        intersect = intersect.reshape(n_samples, -1)
        union = union.reshape(n_samples, -1)

        iou = np.mean(np.sum(intersect, axis=1) / np.sum(union, axis=1))

    return iou


def mask_cross_entropy(pred, target):
    return F.binary_cross_entropy_with_logits(
        pred, target, reduction='mean')[None]


def evaluate_mAP(res_file, ann_type='bbox', ann_file='./data/coco/annotations/person_keypoints_val2017.json',
                 silence=True, halpe=False):
    """Evaluate mAP result for coco dataset.

    Parameters
    ----------
    res_file: str
        Path to result json file.
    ann_type: str
        annotation type, including: `bbox`, `segm`, `keypoints`.
    ann_file: str
        Path to groundtruth file.
    silence: bool
        True: disable running log.

    """

    class NullWriter(object):
        def write(self, arg):
            pass

    # ann_file = os.path.join('./data/coco/annotations/', ann_file)

    if silence:
        nullwrite = NullWriter()
        oldstdout = sys.stdout
        sys.stdout = nullwrite  # disable output

    if halpe:
        from halpecocotools.coco import COCO
        from halpecocotools.cocoeval import COCOeval
    else:
        from pycocotools.coco import COCO
        from pycocotools.cocoeval import COCOeval

    cocoGt = COCO(ann_file)
    cocoDt = cocoGt.loadRes(res_file)

    cocoEval = COCOeval(cocoGt, cocoDt, ann_type)
    cocoEval.evaluate()
    cocoEval.accumulate()
    cocoEval.summarize()

    if isinstance(cocoEval.stats[0], dict):
        stats_names = ['AP', 'Ap .5', 'AP .75', 'AP (M)', 'AP (L)',
                       'AR', 'AR .5', 'AR .75', 'AR (M)', 'AR (L)']
        parts = ['body', 'foot', 'face', 'hand', 'fullbody']

        info = {}
        for i, part in enumerate(parts):
            info[part] = cocoEval.stats[i][part][0]
        return info
    else:
        stats_names = ['AP', 'Ap .5', 'AP .75', 'AP (M)', 'AP (L)',
                       'AR', 'AR .5', 'AR .75', 'AR (M)', 'AR (L)']
        info_str = {}
        for ind, name in enumerate(stats_names):
            info_str[name] = cocoEval.stats[ind]
        return info_str['AP']


def calc_accuracy(preds, labels):
    """Calculate heatmap accuracy."""
    preds = preds.cpu().data.numpy()
    labels = labels.cpu().data.numpy()

    num_joints = preds.shape[1]

    norm = 1.0
    hm_h = preds.shape[2]
    hm_w = preds.shape[3]

    preds, _ = get_max_pred_batch(preds)
    labels, _ = get_max_pred_batch(labels)
    norm = np.ones((preds.shape[0], 2)) * np.array([hm_w, hm_h]) / 10

    dists = calc_dist(preds, labels, norm)

    acc = 0
    sum_acc = 0
    cnt = 0
    for i in range(num_joints):
        acc = dist_acc(dists[i])
        if acc >= 0:
            sum_acc += acc
            cnt += 1

    if cnt > 0:
        return sum_acc / cnt
    else:
        return 0


def calc_integral_accuracy(preds, labels, label_masks, output_3d=False, norm_type='softmax'):
    """Calculate integral coordinates accuracy."""

    def integral_op(hm_1d):
        hm_1d = hm_1d * torch.cuda.comm.broadcast(torch.arange(hm_1d.shape[-1]).type(
            torch.cuda.FloatTensor), devices=[hm_1d.device.index])[0]
        return hm_1d

    preds = preds.detach()
    hm_width = preds.shape[-1]
    hm_height = preds.shape[-2]

    if output_3d:
        hm_depth = hm_height
        num_joints = preds.shape[1] // hm_depth
    else:
        hm_depth = 1
        num_joints = preds.shape[1]

    with torch.no_grad():
        pred_jts, _ = _integral_tensor(preds, num_joints, output_3d, hm_width, hm_height, hm_depth, integral_op,
                                       norm_type=norm_type)

    coords = pred_jts.detach().cpu().numpy()
    coords = coords.astype(float)
    if output_3d:
        coords = coords.reshape((coords.shape[0], int(coords.shape[1] / 3), 3))
    else:
        coords = coords.reshape((coords.shape[0], int(coords.shape[1] / 2), 2))
    coords[:, :, 0] = (coords[:, :, 0] + 0.5) * hm_width
    coords[:, :, 1] = (coords[:, :, 1] + 0.5) * hm_height

    if output_3d:
        labels = labels.cpu().data.numpy().reshape(preds.shape[0], num_joints, 3)
        label_masks = label_masks.cpu().data.numpy().reshape(preds.shape[0], num_joints, 3)

        labels[:, :, 0] = (labels[:, :, 0] + 0.5) * hm_width
        labels[:, :, 1] = (labels[:, :, 1] + 0.5) * hm_height
        labels[:, :, 2] = (labels[:, :, 2] + 0.5) * hm_depth

        coords[:, :, 2] = (coords[:, :, 2] + 0.5) * hm_depth
    else:
        labels = labels.cpu().data.numpy().reshape(preds.shape[0], num_joints, 2)
        label_masks = label_masks.cpu().data.numpy().reshape(preds.shape[0], num_joints, 2)

        labels[:, :, 0] = (labels[:, :, 0] + 0.5) * hm_width
        labels[:, :, 1] = (labels[:, :, 1] + 0.5) * hm_height

    coords = coords * label_masks
    labels = labels * label_masks

    if output_3d:
        norm = np.ones((preds.shape[0], 3)) * np.array([hm_width, hm_height, hm_depth]) / 10
    else:
        norm = np.ones((preds.shape[0], 2)) * np.array([hm_width, hm_height]) / 10

    dists = calc_dist(coords, labels, norm)

    acc = 0
    sum_acc = 0
    cnt = 0
    for i in range(num_joints):
        acc = dist_acc(dists[i])
        if acc >= 0:
            sum_acc += acc
            cnt += 1

    if cnt > 0:
        return sum_acc / cnt
    else:
        return 0


def calc_dist(preds, target, normalize):
    """Calculate normalized distances"""
    preds = preds.astype(np.float32)
    target = target.astype(np.float32)
    dists = np.zeros((preds.shape[1], preds.shape[0]))

    for n in range(preds.shape[0]):
        for c in range(preds.shape[1]):
            if target[n, c, 0] > 1 and target[n, c, 1] > 1:
                normed_preds = preds[n, c, :] / normalize[n]
                normed_targets = target[n, c, :] / normalize[n]
                dists[c, n] = np.linalg.norm(normed_preds - normed_targets)
            else:
                dists[c, n] = -1

    return dists


def dist_acc(dists, thr=0.5):
    """Calculate accuracy with given input distance."""
    dist_cal = np.not_equal(dists, -1)
    num_dist_cal = dist_cal.sum()
    if num_dist_cal > 0:
        return np.less(dists[dist_cal], thr).sum() * 1.0 / num_dist_cal
    else:
        return -1


def calculate_precision(tp, fp):
    return tp / (tp + fp) if (tp + fp) > 0 else 0


# 计算召回率
def calculate_recall(tp, fn):
    return tp / (tp + fn) if (tp + fn) > 0 else 0


def calculate_ap(oks_scores, gt_keypoints, pred_keypoints, gt_visibilities, oks_thresh=0.5):
    """
    :param oks_scores: 所有预测关键点对应的OKS分数列表
    :param gt_keypoints: 真实关键点坐标列表，每个元素形状为 (num_keypoints, 2)
    :param pred_keypoints: 预测关键点坐标列表，每个元素形状为 (num_keypoints, 2)
    :param gt_visibilities: 真实关键点可见性列表，每个元素形状为 (num_keypoints,)
    :param object_scales: 物体尺度列表，例如人体边界框对角线长度等
    :param oks_thresh: OKS阈值，用于判断关键点是否正确检测
    :return: AP值
    """
    sorted_indices = np.argsort(oks_scores)[::-1]  # 按OKS分数从高到低排序的索引
    num_images = len(gt_keypoints)
    num_keypoints = len(gt_keypoints[0])
    precision_list = []
    recall_list = []
    tp_cumulative = 0
    fp_cumulative = 0
    fn_cumulative = 0
    for index in sorted_indices:
        img_index = index // num_keypoints
        kp_index = index % num_keypoints
        is_tp = (oks_scores[index] >= oks_thresh)
        if is_tp:
            tp_cumulative += 1
        else:
            fp_cumulative += 1
        fn_cumulative = np.sum([np.sum(gt_visibilities[i] > 0) for i in range(num_images)]) - tp_cumulative
        precision = calculate_precision(tp_cumulative, fp_cumulative)
        recall = calculate_recall(tp_cumulative, fn_cumulative)
        precision_list.append(precision)
        recall_list.append(recall)
    ap = 0
    for i in range(len(recall_list) - 1):
        ap += (recall_list[i + 1] - recall_list[i]) * (precision_list[i + 1] + precision_list[i]) / 2
    return ap



def get_K():
    fx = 24440.6
    fy = 24440.6
    u = 1280 / 2
    v = 720 / 2
    K = torch.tensor(
        [[fx, 0, u],
         [0, fy, v],
         [0, 0, 1]],
        dtype=torch.float)
    return K


def pnp(pts2d, pts3d, K):
    bs = pts2d.size(0)
    n = pts2d.size(1)
    device = pts2d.device
    pts3d_np = np.array(pts3d)
    K_np = np.array(K)
    P_6d = torch.zeros(bs, 3, 4, device=device)
    R_inv = torch.tensor([[-1, 0, 0], [0, -1, 0], [0, 0, -1]], device=device, dtype=torch.float)
    for i in range(bs):
        pts2d_i_np = np.ascontiguousarray(pts2d[i].cpu()).reshape((n, 1, 2))
        # _, rvec, T, _ = cv.solvePnPRansac(objectPoints=pts3d_np, imagePoints=pts2d_i_np, cameraMatrix=K_np, distCoeffs=None, flags=cv.SOLVEPNP_ITERATIVE, useExtrinsicGuess=True)
        retval, rvec, T = cv.solvePnP(objectPoints=pts3d_np[0], imagePoints=pts2d_i_np[:, 0, ], cameraMatrix=K_np,
                                      distCoeffs=None)
        angle_axis = torch.tensor(rvec, device=device, dtype=torch.float).view(1, 3)
        T = torch.tensor(T, device=device, dtype=torch.float).view(1, 3)
        if T[0, 2] < 0:
            RR = kn.geometry.axis_angle_to_rotation_matrix(angle_axis)
            RR = R_inv.matmul(RR)
            RR = rowan.from_matrix(RR.cpu(), require_orthogonal=False)
            ax = rowan.to_axis_angle(RR)
            angle_axis = torch.tensor(ax[0] * ax[1], device=device, dtype=torch.float).view(1, 3)
            T = R_inv.matmul(T.t()).t()
        angle_axis = torch.tensor(cv.Rodrigues(angle_axis.numpy())[0])
        P_6d[i, :] = torch.cat((angle_axis, T.t()), dim=-1)

    return P_6d


def get_distance(cfg):
    # get the diameter of the object (in cm)

    with open(os.path.join(cfg.LM_DIR, 'lm_models/models/models_info.json')) as f:
        models_info = json.load(f)
    diameter = 0.1 * torch.tensor(models_info[str(objid)]['diameter']).view(1)
    assert diameter.size()[0] == 1
    return diameter


def ADD_accuracy(P, pts3d_h, PM_gt, diameter, P_is_matrix=False):
    bs = P.size(0)
    if P_is_matrix:
        PM = P
    else:
        R_out = kn.angle_axis_to_rotation_matrix(P[:, 0:3].view(bs, 3))
        PM = torch.cat((R_out[:, 0:3, 0:3], P[:, 3:6].view(bs, 3, 1)), dim=-1)
    pts3d_cam = pts3d_h.matmul(PM.transpose(1, 2))
    pts3d_cam_gt = pts3d_h.matmul(PM_gt.transpose(1, 2))
    diff = pts3d_cam - pts3d_cam_gt
    mean_dis = diff.norm(p=2, dim=2).mean(dim=1)
    corrects = (mean_dis < 0.5 * diameter).float()
    n_correct = corrects.sum().item()
    return n_correct, bs, corrects.view(bs, 1)


def batch_project(P, pts3d, K, angle_axis=True):
    n = pts3d.size(0)
    bs = P.size(0)
    device = P.device
    pts3d_h = torch.cat((pts3d, torch.ones(n, 1, device=device)), dim=-1)
    if angle_axis:
        R_out = kn.angle_axis_to_rotation_matrix(P[:, 0:3].view(bs, 3))
        PM = torch.cat((R_out[:, 0:3, 0:3], P[:, 3:6].view(bs, 3, 1)), dim=-1)
    else:
        PM = P
    pts3d_cam = pts3d_h.matmul(PM.transpose(-2, -1))
    pts2d_proj = pts3d_cam.matmul(K.t())
    S = pts2d_proj[:, :, 2].view(bs, n, 1)
    S[S == 0] = S[S == 0] + 1e-12
    pts2d_pro = pts2d_proj[:, :, 0:2].div(S)

    return pts2d_pro

def angle_error_mat(R1, R2):
    cos = (np.trace(np.dot(R1.T, R2)) - 1) / 2
    cos = np.clip(cos, -1.0, 1.0)  # numercial errors can make it out of bounds
    return np.rad2deg(np.abs(np.arccos(cos)))


def angle_error_vec(v1, v2):
    n = np.linalg.norm(v1) * np.linalg.norm(v2)
    return np.rad2deg(np.arccos(np.clip(np.dot(v1, v2) / n, -1.0, 1.0)))

def compute_pose_error(T_0to1, R, t):
    R_gt = T_0to1[:3, :3]
    t_gt = T_0to1[:3, 3]
    error_t = angle_error_vec(t.squeeze(), t_gt)
    error_t = np.minimum(error_t, 180 - error_t)  # ambiguity of E estimation
    error_R = angle_error_mat(R, R_gt)
    return error_t, error_R

def pose_auc(errors, thresholds):
    sort_idx = np.argsort(errors)
    errors = np.array(errors.copy())[sort_idx]
    recall = (np.arange(len(errors)) + 1) / len(errors)
    errors = np.r_[0.0, errors]
    recall = np.r_[0.0, recall]
    aucs = []
    for t in thresholds:
        last_index = np.searchsorted(errors, t)
        r = np.r_[recall[:last_index], recall[last_index - 1]]
        e = np.r_[errors[:last_index], t]
        aucs.append(np.trapz(r, x=e) / t)
    return aucs


def PCK_metric(pred, gt, thr):
    # ## params:
    # ## pred:[n, k, 2], n is the num of people, k is the number of keypoints
    # ## gt:[n, k, 2]
    # ## thr = 0.2*length_body (or thr = 0.5*length_head)
    num_imgs, num_points, _ = pred.shape
    results = np.full((num_imgs, num_points), 0, dtype=np.float32)

    for i in range(num_imgs):

        for j in range(num_points):
            distance = cal_distance(pred[i, j, :], gt[i, j, :])
            if distance <= thr:
                results[i, j] = 1

    mean_points = np.mean(results, axis=0)
    mean_all = np.mean(mean_points)
    return mean_points, mean_all
