from m_virtual_camera import VirtualCamera
import torch
from scene.gaussian_model import GaussianModel
from gaussian_renderer import render
from argparse import ArgumentParser
from arguments import PipelineParams
import math


import numpy as np


def rotate_by_euler_angles(x: float, y: float, z: float):
    """
    rotate in z-y-x order, radians as unit
    """

    if x == 0.0 and y == 0.0 and z == 0.0:
        return np.eye(3, dtype=np.float32)

    return np.asarray(rx(x) @ ry(y) @ rz(z), dtype=np.float32)


def rx(theta):
    return np.matrix(
        [
            [1, 0, 0],
            [0, np.cos(theta), -np.sin(theta)],
            [0, np.sin(theta), np.cos(theta)],
        ]
    )


def ry(theta):
    return np.matrix(
        [
            [np.cos(theta), 0, np.sin(theta)],
            [0, 1, 0],
            [-np.sin(theta), 0, np.cos(theta)],
        ]
    )


def rz(theta):
    return np.matrix(
        [
            [np.cos(theta), -np.sin(theta), 0],
            [np.sin(theta), np.cos(theta), 0],
            [0, 0, 1],
        ]
    )


def rotmat2qvec(R):
    Rxx, Ryx, Rzx, Rxy, Ryy, Rzy, Rxz, Ryz, Rzz = R.flat
    K = (
        np.array(
            [
                [Rxx - Ryy - Rzz, 0, 0, 0],
                [Ryx + Rxy, Ryy - Rxx - Rzz, 0, 0],
                [Rzx + Rxz, Rzy + Ryz, Rzz - Rxx - Ryy, 0],
                [Ryz - Rzy, Rzx - Rxz, Rxy - Ryx, Rxx + Ryy + Rzz],
            ],
            dtype=np.float32,
        )
        / 3.0
    )
    eigvals, eigvecs = np.linalg.eigh(K)
    qvec = eigvecs[[3, 0, 1, 2], np.argmax(eigvals)]
    if qvec[0] < 0:
        qvec *= -1
    return qvec.astype(np.float32)


def qvec2rotmat(qvec):
    return np.array(
        [
            [
                1 - 2 * qvec[2] ** 2 - 2 * qvec[3] ** 2,
                2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3],
                2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2],
            ],
            [
                2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3],
                1 - 2 * qvec[1] ** 2 - 2 * qvec[3] ** 2,
                2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1],
            ],
            [
                2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2],
                2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1],
                1 - 2 * qvec[1] ** 2 - 2 * qvec[2] ** 2,
            ],
        ]
    ).astype(np.float32)


def quat_multiply(quaternion0, quaternion1):
    w0, x0, y0, z0 = np.split(quaternion0, 4, axis=-1)
    w1, x1, y1, z1 = np.split(quaternion1, 4, axis=-1)
    return np.concatenate(
        (
            -x1 * x0 - y1 * y0 - z1 * z0 + w1 * w0,
            x1 * w0 + y1 * z0 - z1 * y0 + w1 * x0,
            -x1 * z0 + y1 * w0 + z1 * x0 + w1 * y0,
            x1 * y0 - y1 * x0 + z1 * w0 + w1 * z0,
        ),
        axis=-1,
    )


def test_transform(
    angles_x=0,
    angles_y=0,
    angles_z=0,
    translate_x=0,
    translate_y=0,
    translate_z=0,
    g_model_scale=1.0,
    fov=50,
    image_width=512,
    image_height=512,
    distance=1.1,
    theta=270,
    phi=0,
):

    angles_x = math.radians(angles_x)
    angles_y = math.radians(angles_y)
    angles_z = math.radians(angles_z)
    # Set up command line argument parser
    parser = ArgumentParser(description="gaussian splatting")
    pipe = PipelineParams(parser)

    gaussians = GaussianModel(3)
    gaussians.load_ply("./models/test.ply")

    cam: VirtualCamera = VirtualCamera(
        image_width=image_width, image_height=image_height, FoVy=math.radians(fov)
    )

    cam.set_theta_phi_distance(theta, phi, distance)

    bg = torch.tensor([0, 1, 0], dtype=torch.float32, device="cuda")
    gaussians._xyz = gaussians._xyz + torch.tensor([0.5,0.5,0.0]).cuda()
    
    ############################################################################################
    g_model_rotation_matrix = rotate_by_euler_angles(angles_x, angles_y, angles_z)
    g_model_translate = torch.tensor(
        [translate_x, translate_y, translate_z], dtype=torch.float32, device="cuda"
    )

    
    ##  -------------------------- handle xyz ---------------------------------
    # preprocess:move center to origin
    object_center = torch.mean(gaussians._xyz,dim=0).cuda()
    gaussians._xyz = gaussians._xyz -object_center
    
    # first scale
    gaussians._xyz = (gaussians._xyz) * g_model_scale
    # rotate
    gaussians._xyz = gaussians._xyz @ torch.from_numpy(g_model_rotation_matrix).cuda().T
    # third translate
    gaussians._xyz = gaussians._xyz + g_model_translate
    
    # postprocess: move center back
    gaussians._xyz = gaussians._xyz + object_center
    

    ##  -------------------------- handle rot ---------------------------------
    quaternions = rotmat2qvec(g_model_rotation_matrix)[np.newaxis, ...]
    rotations_from_quats = quat_multiply(
        gaussians._rotation.cpu().detach().numpy(), quaternions
    )
    rotations_from_quats = rotations_from_quats / np.linalg.norm(
        rotations_from_quats, axis=-1, keepdims=True
    )
    gaussians._rotation = torch.from_numpy(rotations_from_quats).cuda()

    ##  -------------------------- handle scale ---------------------------------
    gaussians._scaling = gaussians.scaling_inverse_activation(gaussians.get_scaling * g_model_scale)
    

    ############################################################################################
    with torch.no_grad():
        rendering = render(cam, gaussians, pipe, bg)["render"]
    torchvision.utils.save_image(rendering, f"./debug/img.png")
    
    


if __name__ == "__main__":
    
    test_transform()
