import numpy as np
import torch
from pytorch3d.transforms import quaternion_to_matrix
from transforms3d.quaternions import quat2mat

def body_obj_to_contact(body, obj):
    # find closest point and check whether less than threshold 0.1meter
    # input
    # T,B,N_joints,3
    # T,B,N_points,3
    # output: T,B,N_joints
    T, B, N_joints = body.shape[:3]
    contact = torch.zeros((T, B, N_joints)).to(body.device)
    distance_matrix = (body[:,:,:,None] -  obj[:,:,None]).norm(dim=4) # T,B,N_joints,N_points
    min_distance = distance_matrix.min(dim=3)[0]#T,B,N_joints
    min_indices = min_distance.argmin(dim=2, keepdim=True)#T,B,1
    # set corresponding positions to 1
    for i in range(T):
        for j in range(B):
            contact[i,j,min_indices[i,j]] = 1 if min_distance[i,j][min_indices[i,j]]<0.1 else 0
    return contact

def calc_obj_pred(pose_pred, zero_pose_obj):
    # input: pose pred (T, B, 2, 7)
    # zero_pose_obj: (B, 2, N_points, 3)
    # return obj_pred: (T, B, 2, N_points, 3)
    # quaternion to matrix
    obj_gt_base = zero_pose_obj[None, ...]  # (1, B, 2, N_points, 3)
    translation = pose_pred[..., None, :3]  # (T, B, 2, 1, 3)
    quat_correct = torch.cat([pose_pred[..., -1:], pose_pred[..., -4:-1]],dim=-1)  # xyzw->wxyz, (T, B, 2, 4)
    rotation_matrix = quaternion_to_matrix(quat_correct)  # (T, B, 2, 3, 3)
    obj_pred = rotation_matrix.matmul(obj_gt_base.permute(0, 1, 2, 4, 3)).permute(0, 1, 2, 4, 3).contiguous() + translation  # (T, B, N_points, 3) / (T, B, 2, N_points, 3)

    return obj_pred


# liuyun
def calc_world_pts(pose_pred, zero_pose_obj):
    # input: pose pred (T, 7)
    # zero_pose_obj: (N_points, 3)
    # return obj_pred: (T, N_points, 3)
    # quaternion to matrix
    obj_gt_base = zero_pose_obj[None, ...]  # (1, N_points, 3)
    translation = pose_pred[..., None, :3]  # (T, 1, 3)
    quat_correct = torch.cat([pose_pred[..., -1:], pose_pred[..., -4:-1]],dim=-1)  # xyzw->wxyz, (T, 4)
    rotation_matrix = quaternion_to_matrix(quat_correct)  # (T, 3, 3)
    obj_pred = rotation_matrix.matmul(obj_gt_base.permute(0, 2, 1)).permute(0, 2, 1).contiguous() + translation  # (T, N_points, 3)

    return obj_pred


def calc_metric_single(body_pred, body_gt, obj_pred, obj_gt, pose_pred, pose_gt, test_frame_N=10):
    # body_pred: T,B,N_joints,3
    # obj_pred: T,B,N_points,3
    # pose_pred: T,B,7

    assert body_pred.size()[-1]==3
    assert obj_pred.size()[-1]==3

    mpjpe_h = (body_pred[10:10+test_frame_N] - body_gt[10:10+test_frame_N]).norm(dim=-1,p=2).mean().item()
    mpjpe_o = (obj_pred[10:10+test_frame_N] - obj_gt[10:10+test_frame_N]).norm(dim=-1,p=2).mean().item()
    translation_error = (pose_pred[10:10+test_frame_N, ..., :3] - pose_gt[10:10+test_frame_N, ..., :3]).norm(dim=-1,p=2).mean().item()
    translation_error_detailed = np.float32([(pose_pred[10:10+test_frame_N, :, 0, :3] - pose_gt[10:10+test_frame_N, :, 0, :3]).norm(dim=-1,p=2).mean().item(), (pose_pred[10:10+test_frame_N, :, 1, :3] - pose_gt[10:10+test_frame_N, :, 1, :3]).norm(dim=-1,p=2).mean().item()])

    pose_pred = pose_pred.detach().cpu().numpy()
    pose_gt = pose_gt.detach().cpu().numpy()
    B = pose_pred.shape[1]
    rotation_error_1, rotation_error_2 = [], []
    for frame_idx in range(10, 10+test_frame_N):
        for batch_idx in range(B):
            q_pred_1 = pose_pred[frame_idx, batch_idx, 0][-4:]  # xyzw
            q_pred_2 = pose_pred[frame_idx, batch_idx, 1][-4:]  # xyzw
            q_gt_1 = pose_gt[frame_idx, batch_idx, 0][-4:]  # xyzw
            q_gt_2 = pose_gt[frame_idx, batch_idx, 1][-4:]  # xyzw
            R_pred_1 = quat2mat([q_pred_1[3], q_pred_1[0], q_pred_1[1], q_pred_1[2]])
            R_pred_2 = quat2mat([q_pred_2[3], q_pred_2[0], q_pred_2[1], q_pred_2[2]])
            R_gt_1 = quat2mat([q_gt_1[3], q_gt_1[0], q_gt_1[1], q_gt_1[2]])
            R_gt_2 = quat2mat([q_gt_2[3], q_gt_2[0], q_gt_2[1], q_gt_2[2]])
            R_diff_1 = np.arccos(((np.trace(R_pred_1 @ R_gt_1.T) - 1) / 2).clip(-1, 1)) / np.pi * 180  # unit: deg
            R_diff_2 = np.arccos(((np.trace(R_pred_2 @ R_gt_2.T) - 1) / 2).clip(-1, 1)) / np.pi * 180  # unit: deg
            rotation_error_1.append(R_diff_1)
            rotation_error_2.append(R_diff_2)
    
    rotation_error_1 = np.float32(rotation_error_1).mean()
    rotation_error_2 = np.float32(rotation_error_2).mean()
    rotation_error_detailed = np.float32([rotation_error_1, rotation_error_2])
    rotation_error = rotation_error_detailed.mean()
    
    metric_dict = dict(
        mpjpe_h = mpjpe_h,
        mpjpe_o = mpjpe_o,
        translation_error = translation_error,
        rotation_error = rotation_error,
        mpjpe_h_detailed = np.float32([(body_pred[10:10+test_frame_N, :, 0] - body_gt[10:10+test_frame_N, :, 0]).norm(dim=-1,p=2).mean().item(), (body_pred[10:10+test_frame_N, :, 1] - body_gt[10:10+test_frame_N, :, 1]).norm(dim=-1,p=2).mean().item()]),
        mpjpe_o_detailed = np.float32([(obj_pred[10:10+test_frame_N, :, 0] - obj_gt[10:10+test_frame_N, :, 0]).norm(dim=-1,p=2).mean().item(), (obj_pred[10:10+test_frame_N, :, 1] - obj_gt[10:10+test_frame_N, :, 1]).norm(dim=-1,p=2).mean().item()]),
        translation_error_detailed = translation_error_detailed,
        rotation_error_detailed = rotation_error_detailed,
    )
    return metric_dict
