import torch
from torch.nn import functional as F

def batch_quaternion_rotate(coords, quaternions):
  """
  对一批三维坐标点应用对应的四元数旋转。

  参数:
  coords (Tensor): 形状为 (n, 3) 的 Tensor，表示 n 个三维坐标点。
  quaternions (Tensor): 形状为 (n, 4) 的 Tensor，表示与坐标点对应的 n 个四元数，
             四元数格式为 (w, x, y, z)。

  返回:
  Tensor: 形状为 (n, 3) 的 Tensor，表示旋转后的 n 个三维坐标点。
  """
  # 确保输入是 float 类型
  coords = coords.float()
  quaternions = quaternions.float()
  include_batch = len(coords.shape)==3 and len(quaternions.shape)==3
  if include_batch:
    b = coords.shape[0]
    assert b == quaternions.shape[0]
    quaternions = quaternions.reshape(-1, 4)
    coords = coords.reshape(-1, 3)

  # 归一化四元数，确保是单位四元数 (旋转的四元数必须是单位四元数)
  quaternion_norms = torch.norm(quaternions, dim=1, keepdim=True)
  unit_quaternions = quaternions / quaternion_norms

  # 提取四元数的分量
  w = unit_quaternions[:, 0]
  x = unit_quaternions[:, 1]
  y = unit_quaternions[:, 2]
  z = unit_quaternions[:, 3]

  # 构建旋转矩阵 (针对每个四元数，批量构建)
  x2 = x**2
  y2 = y**2
  z2 = z**2
  xy = x * y
  xz = x * z
  xw = x * w
  yz = y * z
  yw = y * w
  zw = z * w

  rotation_matrices = torch.stack([
    torch.stack([1 - 2*y2 - 2*z2,   2*xy - 2*zw,   2*xz + 2*yw], dim=1),
    torch.stack([  2*xy + 2*zw,   1 - 2*x2 - 2*z2,   2*yz - 2*xw], dim=1),
    torch.stack([  2*xz - 2*yw,   2*yz + 2*xw,   1 - 2*x2 - 2*y2], dim=1)
  ], dim=1).view(-1, 3, 3) # 形状变为 (n, 3, 3)

  # 将坐标点扩展为齐次坐标 (n, 3, 1)，虽然这里实际上是向量乘矩阵，但为了维度匹配
  coords_homogeneous = coords.unsqueeze(-1) # 形状变为 (n, 3, 1)

  # 执行批量矩阵乘法进行旋转
  rotated_coords_homogeneous = torch.matmul(rotation_matrices, coords_homogeneous) # (n, 3, 3) x (n, 3, 1) -> (n, 3, 1)

  # 移除齐次坐标的维度，得到旋转后的三维坐标
  rotated_coords = rotated_coords_homogeneous.squeeze(-1) # 形状变为 (n, 3)

  if include_batch:
    rotated_coords = rotated_coords.reshape(b, -1, 3)   
  return rotated_coords
  
def get_inverse_quaternion(quaternion):
  inverse_quaternion = quaternion.clone()
  #  对于单位四元数，其逆四元数（代表逆旋转）就是其共轭四元数
  #  共轭四元数通过取反向量部分 (x, y, z) 得到，而实部 (w) 保持不变
  inverse_quaternion[1:] *= -1  #  对 quaternion[1], quaternion[2], quaternion[3] (即 x, y, z) 取反
  return inverse_quaternion
  
def quaternion_multiply(quat_next, quat_prev):
  wn, xn, yn, zn = quat_next.unbind(dim=1)
  wp, xp, yp, zp = quat_prev.unbind(dim=1)
  w = wn * wp - xn * xp - yn * yp - zn * zp
  x = wn * xp + xn * wp + yn * zp - zn * yp
  y = wn * yp - xn * zp + yn * wp + zn * xp
  z = wn * zp + xn * yp - yn * xp + zn * wp
  return torch.stack([w, x, y, z], dim=1)

def compute_v_normals(verts, faces):
  i0 = faces[..., 0].long()
  i1 = faces[..., 1].long()
  i2 = faces[..., 2].long()

  v0 = verts[..., i0, :]
  v1 = verts[..., i1, :]
  v2 = verts[..., i2, :]
  face_normals = torch.cross(v1 - v0, v2 - v0, dim=-1)
  v_normals = torch.zeros_like(verts)
  N = verts.shape[0]
  v_normals.scatter_add_(1, i0[..., None].repeat(N, 1, 3), face_normals)
  v_normals.scatter_add_(1, i1[..., None].repeat(N, 1, 3), face_normals)
  v_normals.scatter_add_(1, i2[..., None].repeat(N, 1, 3), face_normals)
  return F.normalize(v_normals, dim=-1)

def compute_normals(verts, faces):
  i0 = faces[..., 0].long()
  i1 = faces[..., 1].long()
  i2 = faces[..., 2].long()

  v0 = verts[..., i0, :]
  v1 = verts[..., i1, :]
  v2 = verts[..., i2, :]
  face_normals = torch.cross(v1 - v0, v2 - v0, dim=-1)
  return face_normals

  
def comput_v_rotate_quat(v1, v0=None, epsilon=1e-7):
  if v0 is None:
    v0 = torch.ones_like(v1)
    v0.data[... ,:2] = 0. # 默认为z轴
  v0 = v0 / (torch.linalg.norm(v0, dim=-1, keepdim=True) + epsilon)
  v1 = v1 / (torch.linalg.norm(v1, dim=-1, keepdim=True) + epsilon)

  dot = torch.sum(v0 * v1, dim=-1, keepdim=True)
  cross = torch.cross(v0, v1, dim=-1)

  w = torch.sqrt(torch.clamp(0.5 * (1 + dot), min=0.0)) # 半角公式
  invsin = (torch.sqrt(torch.clamp(2.0 * (1 + dot), min=epsilon)) + epsilon)
  x = cross[..., 0] / invsin.squeeze(-1)
  y = cross[..., 1] / invsin.squeeze(-1)
  z = cross[..., 2] / invsin.squeeze(-1)
  return torch.stack([w.squeeze(-1), x, y, z], dim=-1)


# %% build sigma
def build_quaternion(matrix: torch.Tensor) -> torch.Tensor:
  r"""
  Copied from https://github.com/naver/roma
  Converts rotation matrix to unit quaternion representation.

  Args:
      R (Nx3x3 tensor): batch of rotation matrices.
  Returns:
      batch of unit quaternions (...x4 tensor, XYZW convention).
  """
  num_rotations, D1, D2 = matrix.shape
  assert((D1, D2) == (3,3)), "Input should be a Bx3x3 tensor."

  # Adapted from SciPy:
  # https://github.com/scipy/scipy/blob/7cb3d751756907238996502b92709dc45e1c6596/scipy/spatial/transform/rotation.py#L480

  decision_matrix = torch.empty((num_rotations, 4), dtype=matrix.dtype, device=matrix.device)
  decision_matrix[:, :3] = matrix.diagonal(dim1=1, dim2=2)
  decision_matrix[:, -1] = decision_matrix[:, :3].sum(axis=1)
  choices = decision_matrix.argmax(axis=1)

  quat = torch.empty((num_rotations, 4), dtype=matrix.dtype, device=matrix.device)

  ind = torch.nonzero(choices != 3, as_tuple=True)[0]
  i = choices[ind]
  j = (i + 1) % 3
  k = (j + 1) % 3

  quat[ind, i] = 1 - decision_matrix[ind, -1] + 2 * matrix[ind, i, i]
  quat[ind, j] = matrix[ind, j, i] + matrix[ind, i, j]
  quat[ind, k] = matrix[ind, k, i] + matrix[ind, i, k]
  quat[ind, 3] = matrix[ind, k, j] - matrix[ind, j, k]

  ind = torch.nonzero(choices == 3, as_tuple=True)[0]
  quat[ind, 0] = matrix[ind, 2, 1] - matrix[ind, 1, 2]
  quat[ind, 1] = matrix[ind, 0, 2] - matrix[ind, 2, 0]
  quat[ind, 2] = matrix[ind, 1, 0] - matrix[ind, 0, 1]
  quat[ind, 3] = 1 + decision_matrix[ind, -1]

  return quat / torch.norm(quat, dim=1, keepdim=True)

def build_rotation(q: torch.Tensor) -> torch.Tensor: # 四元数转旋转矩阵
  norm = q.norm(dim=-1, keepdim=True)
  q = q / norm

  R = torch.zeros((q.size(0), 3, 3), device='cuda')

  r = q[..., 0]
  x = q[..., 1]
  y = q[..., 2]
  z = q[..., 3]

  R[..., 0, 0] = 1 - 2 * (y*y + z*z)
  R[..., 0, 1] = 2 * (x*y - r*z)
  R[..., 0, 2] = 2 * (x*z + r*y)
  R[..., 1, 0] = 2 * (x*y + r*z)
  R[..., 1, 1] = 1 - 2 * (x*x + z*z)
  R[..., 1, 2] = 2 * (y*z - r*x)
  R[..., 2, 0] = 2 * (x*z - r*y)
  R[..., 2, 1] = 2 * (y*z + r*x)
  R[..., 2, 2] = 1 - 2 * (x*x + y*y)
  return R

def build_scaling_rotation(s, r): 
  L = torch.zeros((s.shape[0], 3, 3), dtype=torch.float, device="cuda")
  R = build_rotation(r)

  L[:,0,0] = s[:,0]
  L[:,1,1] = s[:,1]
  L[:,2,2] = s[:,2]

  L = R @ L
  return L


def build_scaling_rotation_2d(s, r): 
  L = torch.zeros((s.shape[0], 2, 2), dtype=torch.float, device="cuda")
  R = build_rotation(r)[:, :2, :2]

  L[:,0,0] = s[:,0]
  L[:,1,1] = s[:,1]

  L = R @ L
  return L

# %%
def batch_rodrigues(rot_vecs, epsilon=1e-8, dtype=torch.float32):
    """Calculates the rotation matrices for a batch of rotation vectors
    Parameters
    ----------
    rot_vecs: torch.tensor Nx3
        array of N axis-angle vectors
    Returns
    -------
    R: torch.tensor Nx3x3
        The rotation matrices for the given axis-angle parameters
    """

    batch_size = rot_vecs.shape[0]
    device = rot_vecs.device

    angle = torch.norm(rot_vecs + 1e-8, dim=1, keepdim=True)
    rot_dir = rot_vecs / angle

    cos = torch.unsqueeze(torch.cos(angle), dim=1)
    sin = torch.unsqueeze(torch.sin(angle), dim=1)

    # Bx1 arrays
    rx, ry, rz = torch.split(rot_dir, 1, dim=1)
    K = torch.zeros((batch_size, 3, 3), dtype=dtype, device=device)

    zeros = torch.zeros((batch_size, 1), dtype=dtype, device=device)
    K = torch.cat([zeros, -rz, ry, rz, zeros, -rx, -ry, rx, zeros], dim=1).view(
        (batch_size, 3, 3)
    )

    ident = torch.eye(3, dtype=dtype, device=device).unsqueeze(dim=0)
    rot_mat = ident + sin * K + (1 - cos) * torch.bmm(K, K)
    return rot_mat