import numpy as np
# import utils
import dataset.utils as utils
from tqdm import tqdm
import torch
import torchgeometry
# import torch.nn.functional as F
import cv2
from pathlib import Path

'''
def quaternion_to_axis_angle(quaternions):
    """
    Convert rotations given as quaternions to axis/angle.

    Args:
        quaternions: quaternions with real part first,
            as tensor of shape (..., 4).

    Returns:
        Rotations given as a vector in axis angle form, as a tensor
            of shape (..., 3), where the magnitude is the angle
            turned anticlockwise in radians around the vector's
            direction.
    """
    norms = torch.norm(quaternions[..., 1:], p=2, dim=-1, keepdim=True)
    half_angles = torch.atan2(norms, quaternions[..., :1])
    angles = 2 * half_angles
    eps = 1e-6
    small_angles = angles.abs() < eps
    sin_half_angles_over_angles = torch.empty_like(angles)
    sin_half_angles_over_angles[~small_angles] = (
        torch.sin(half_angles[~small_angles]) / angles[~small_angles]
    )
    # for x small, sin(x/2) is about x/2 - (x/2)^3/6
    # so sin(x/2)/x is about 1/2 - (x*x)/48
    sin_half_angles_over_angles[small_angles] = (
        0.5 - (angles[small_angles] * angles[small_angles]) / 48
    )
    return quaternions[..., 1:] / sin_half_angles_over_angles


def quaternion_to_matrix(quaternions):
    """
    Convert rotations given as quaternions to rotation matrices.

    Args:
        quaternions: quaternions with real part first,
            as tensor of shape (..., 4).

    Returns:
        Rotation matrices as tensor of shape (..., 3, 3).
    """
    r, i, j, k = torch.unbind(quaternions, -1)
    two_s = 2.0 / (quaternions * quaternions).sum(-1)

    o = torch.stack(
        (
            1 - two_s * (j * j + k * k),
            two_s * (i * j - k * r),
            two_s * (i * k + j * r),
            two_s * (i * j + k * r),
            1 - two_s * (i * i + k * k),
            two_s * (j * k - i * r),
            two_s * (i * k - j * r),
            two_s * (j * k + i * r),
            1 - two_s * (i * i + j * j),
        ),
        -1,
    )
    return o.reshape(quaternions.shape[:-1] + (3, 3))


def _sqrt_positive_part(x: torch.Tensor) -> torch.Tensor:
    """
    Returns torch.sqrt(torch.max(0, x))
    but with a zero subgradient where x is 0.
    """
    ret = torch.zeros_like(x)
    positive_mask = x > 0
    ret[positive_mask] = torch.sqrt(x[positive_mask])
    return ret


def matrix_to_quaternion(matrix: torch.Tensor) -> torch.Tensor:
    """
    Convert rotations given as rotation matrices to quaternions.

    Args:
        matrix: Rotation matrices as tensor of shape (..., 3, 3).

    Returns:
        quaternions with real part first, as tensor of shape (..., 4).
    """
    if matrix.size(-1) != 3 or matrix.size(-2) != 3:
        raise ValueError(f"Invalid rotation matrix  shape f{matrix.shape}.")

    batch_dim = matrix.shape[:-2]
    m00, m01, m02, m10, m11, m12, m20, m21, m22 = torch.unbind(
        matrix.reshape(*batch_dim, 9), dim=-1
    )

    q_abs = _sqrt_positive_part(
        torch.stack(
            [
                1.0 + m00 + m11 + m22,
                1.0 + m00 - m11 - m22,
                1.0 - m00 + m11 - m22,
                1.0 - m00 - m11 + m22,
            ],
            dim=-1,
        )
    )

    # we produce the desired quaternion multiplied by each of r, i, j, k
    quat_by_rijk = torch.stack(
        [
            torch.stack([q_abs[..., 0] ** 2, m21 - m12, m02 - m20, m10 - m01], dim=-1),
            torch.stack([m21 - m12, q_abs[..., 1] ** 2, m10 + m01, m02 + m20], dim=-1),
            torch.stack([m02 - m20, m10 + m01, q_abs[..., 2] ** 2, m12 + m21], dim=-1),
            torch.stack([m10 - m01, m20 + m02, m21 + m12, q_abs[..., 3] ** 2], dim=-1),
        ],
        dim=-2,
    )

    # We floor here at 0.1 but the exact level is not important; if q_abs is small,
    # the candidate won't be picked.
    # pyre-ignore [16]: `torch.Tensor` has no attribute `new_tensor`.
    quat_candidates = quat_by_rijk / (2.0 * q_abs[..., None].max(q_abs.new_tensor(0.1)))

    # if not for numerical problems, quat_candidates[i] should be same (up to a sign),
    # forall i; we pick the best-conditioned one (with the largest denominator)

    return quat_candidates[
        F.one_hot(q_abs.argmax(dim=-1), num_classes=4) > 0.5, :  # pyre-ignore[16]
    ].reshape(*batch_dim, 4)
    
    
def rotate_orient(global_orients):
    R_x90 = np.array(((1,0,0),(0,0,-1),(0,1,0)))

    global_quats = torchgeometry.angle_axis_to_quaternion(global_orients)
    rotmatrixs = quaternion_to_matrix(global_quats)
    rotmatrixs_new = np.matmul(rotmatrixs, R_x90)
    quats_new = matrix_to_quaternion(rotmatrixs_new)
    axis_new = quaternion_to_axis_angle(quats_new)
    return axis_new
'''

class Anim(object):
    """
    A very basic animation object: 
        contains information from npz file and corresponding mp4 file
    """
    JOINTS = (
        "Hips", #0
        "Left Upper Leg", #1
        "Right Upper Leg", #2
        "Spine", #3
        "Left Leg", #4
        "Right Leg", #5
        "Spine1", #6
        "Left Foot", #7
        "Right Foot", #8
        "Thorax", #9
        "Left Toe", #10
        "Right Toe", #11
        "Neck", #12
        "Left Shoulder", #13
        "Right Shoulder", #14
        "Head", #15
        "Left ForeArm", #16
        "Right ForeArm", #17
        "Left Arm", #18
        "Right Arm", #19
        "Left Hand", #20
        "Right Hand", #21
    )

    PARENTS = (
        -1, 
        0, 
        0, 
        0, 
        1, 
        2, 
        3, 
        4, 
        5, 
        6, 
        7, 
        8, 
        9, 
        12, 
        12, 
        12, 
        13, 
        14, 
        16, 
        17, 
        18, 
        19,
    )
    
    OFFSETS = (
        (0.000000, 0.000000, 0.000000),
        (0.062035, -0.079765, -0.016749),
        (-0.064593, -0.087580, -0.011613),
        (0.004544, 0.119676, -0.043130),
        (0.041399, -0.378502, 0.002566),
        (-0.039931, -0.377208, -0.010048),
        (0.003125, 0.124274, 0.021492),
        (-0.015613, -0.422745, -0.039205),
        (0.019047, -0.415820, -0.036255),
        (-0.002173, 0.050983, 0.003789),
        (0.035050, -0.058445, 0.121267),
        (-0.035788, -0.060426, 0.127937),
        (-0.014696, 0.206942, -0.036949),
        (0.071055, 0.112063, -0.015878),
        (-0.081248, 0.111037, -0.022370),
        (0.010241, 0.081782, 0.045505),
        (0.125092, 0.041551, -0.021082),
        (-0.113054, 0.044423, -0.010502),
        (0.238778, -0.013728, -0.025897),
        (-0.244392, -0.011038, -0.036114),
        (0.257455, 0.012466, -0.004861),
        (-0.256492, 0.002269, -0.005019),
    )
    
    def __init__(self, video, index, axis, pos, quats):
        """
        :param video: pose image from video, (video_nframes, height, width, channels=3)
        :param index: mocap frame corresponding video index, (mocap_nframes, )
        :param pos: local position, (mocap_nframes, 3)
        :param axis: axis angle, (mocap_nframes, 24*3)
        :param quats: local quaternions, (mocap_nframes, 22, 4)
        """
        self.video = video
        self.index = index
        self.axis = axis 
        self.pos = pos
        self.quats = quats

        
def read_data(video_path, npz_path):
    """
    Reads mp4 video and extracts npz animation information.
    
    :param video_path: video filename, Path
    :param npz_path: npz filename, Path
    :return: A simple Anim object conatining the extracted information.
    """
    
    if (not npz_path.exists()) or (not video_path.exists()):
        print(not npz_path.exists())
        print(not video_path.exists())
        print((not npz_path.exists()) or (not video_path.exists()))
        print(npz_path)
        print(video_path)
        raise IOError("File path is not exist")
    
    # pose
    npz_file = np.load(npz_path)        
    trans = npz_file['trans']
    poses = npz_file['poses']    
    axis = poses[:, :72] # 24 joints axis angle
        
    # video
    video = []
    img = []
    capture = cv2.VideoCapture(str(video_path))
    if capture.isOpened():
        rval, frame = capture.read()
    else:
        rval = False
    while rval:
        video.append(frame)
        rval, frame = capture.read()
    video = np.asarray(video)
    img = np.asarray(img)
    
    # index
    mocap_nframes = poses.shape[0]
    video_nframes = video.shape[0]
    ratio = mocap_nframes / video_nframes
    index = (np.asarray(range(mocap_nframes)) / ratio).astype(np.int16)
    
    # position
    pos = np.array(Anim.OFFSETS)[np.newaxis].repeat(mocap_nframes, axis=0)
    pos[:, 0:1]= trans[:,np.newaxis,:]
    
    # quaternion
    axis_quats = torch.from_numpy(poses[:, :66].reshape(mocap_nframes, -1, 3)) # remove 2 hand joints
    quats = torchgeometry.angle_axis_to_quaternion(axis_quats).numpy()
    quats = utils.remove_quat_discontinuities(quats)
    
    return Anim(video, index, axis, pos, quats)


def get_amass_dataset(data_path, mode, window=200, offset=80):
    A = [] # Axis angle 
    B = [] # Shape betas
    I = [] # Only the first and last frames of the sequence are stored
    X = [] # Root position
    Q = [] # Quaternion
    
    dir_to_sketch = Path(data_path) / 'SketchRenders'
    files_sketch = utils.list_all_files(dir_to_sketch)
    
    ratio_index = int(len(files_sketch)*0.85)
    if mode=="train":
        files_sketch = files_sketch[0:ratio_index]
    elif mode=="valid":
        files_sketch = files_sketch[ratio_index:]
    elif mode=="test":
        pass
    else:
        raise AttributeError("The dataset mode is error")
    
    
    for video_path in tqdm(files_sketch):
        npz_path = video_path.replace('SketchRenders', 'SMPLX')
        npz_path = npz_path.replace('.mp4', '_stageii.npz')
        anim = read_data(Path(video_path), Path(npz_path))
        # Sliding windows
        i = 0
        while i + window < anim.pos.shape[0]:
            # q, x = utils.quat_fk(anim.quats[i: i + window], anim.pos[i: i + window], anim.PARENTS)
            # c_l, c_r = utils.extract_feet_contacts(x, [7,10], [8,11], velfactor=0.2)
            
            B.append([anim.betas[:10]])
            A.append([[anim.axis[i]], [anim.axis[i+window]]])
            I.append(anim.video[[anim.index[i], anim.index[i+window]]])
            X.append(anim.pos[i: i + window])
            Q.append(anim.quats[i: i + window])
            # contacts_l.append(c_l)
            # contacts_r.append(c_r)
            i += offset
    
    B = np.asarray(B).astype(np.float32)
    A = np.asarray(A).astype(np.float32)
    X = np.asarray(X)
    Q = np.asarray(Q)
    I = np.asarray(I)
    
    return B, A, I, X, Q

def update_amass_dataset(X, Q):
    npast = 10
    contacts_l = []
    contacts_r = []
    for i in range(X.shape[0]):
        q, x = utils.quat_fk(Q[i], X[i], Anim.PARENTS)
        c_l, c_r = utils.extract_feet_contacts(x, [7,10], [8,11], velfactor=0.2)
        contacts_l.append(c_l)
        contacts_r.append(c_r)
        
    contacts_l = np.asarray(contacts_l)
    contacts_r = np.asarray(contacts_r)
    
    # Sequences around XZ = 0
    xzs = torch.mean(X[:, :, 0, ::2], dim=1, keepdim=True)
    X[:, :, 0, 0] = X[:, :, 0, 0] - xzs[..., 0]
    X[:, :, 0, 2] = X[:, :, 0, 2] - xzs[..., 1]

    # Unify facing on last seed frame
    X, Q = utils.rotate_at_frame(X, Q, Anim.PARENTS, n_past=npast)
    
    return X, Q, contacts_l, contacts_r, Anim.PARENTS