import numpy as np
import collections
import struct
from steps_params import args
from scene.dataset_readers import qvec2rotmat
import sys, os

CameraModel = collections.namedtuple(
    "CameraModel", ["model_id", "model_name", "num_params"])
Camera = collections.namedtuple(
    "Camera", ["id", "model", "width", "height", "params"])
BaseImage = collections.namedtuple(
    "Image", ["id", "qvec", "tvec", "camera_id", "name", "xys", "point3D_ids"])
Point3D = collections.namedtuple(
    "Point3D", ["id", "xyz", "rgb", "error", "image_ids", "point2D_idxs"])
CAMERA_MODELS = {
    CameraModel(model_id=0, model_name="SIMPLE_PINHOLE", num_params=3),
    CameraModel(model_id=1, model_name="PINHOLE", num_params=4),
    CameraModel(model_id=2, model_name="SIMPLE_RADIAL", num_params=4),
    CameraModel(model_id=3, model_name="RADIAL", num_params=5),
    CameraModel(model_id=4, model_name="OPENCV", num_params=8),
    CameraModel(model_id=5, model_name="OPENCV_FISHEYE", num_params=8),
    CameraModel(model_id=6, model_name="FULL_OPENCV", num_params=12),
    CameraModel(model_id=7, model_name="FOV", num_params=5),
    CameraModel(model_id=8, model_name="SIMPLE_RADIAL_FISHEYE", num_params=4),
    CameraModel(model_id=9, model_name="RADIAL_FISHEYE", num_params=5),
    CameraModel(model_id=10, model_name="THIN_PRISM_FISHEYE", num_params=12)
}
CAMERA_MODEL_IDS = dict([(camera_model.model_id, camera_model)
                         for camera_model in CAMERA_MODELS])
CAMERA_MODEL_NAMES = dict([(camera_model.model_name, camera_model)
                           for camera_model in CAMERA_MODELS])

def read_next_bytes(fid, num_bytes, format_char_sequence, endian_character="<"):
    """Read and unpack the next bytes from a binary file.
    :param fid:
    :param num_bytes: Sum of combination of {2, 4, 8}, e.g. 2, 6, 16, 30, etc.
    :param format_char_sequence: List of {c, e, f, d, h, H, i, I, l, L, q, Q}.
    :param endian_character: Any of {@, =, <, >, !}
    :return: Tuple of read and unpacked values.
    """
    data = fid.read(num_bytes)
    return struct.unpack(endian_character + format_char_sequence, data)

class ImageInfo(BaseImage):
    def qvec2rotmat(self):
      qvec = self.qvec
      return qvec2rotmat(qvec)
    #   # 四元数转旋转矩阵
    #   return np.array([
    #   [1 - 2 * qvec[2]**2 - 2 * qvec[3]**2,
    #    2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3],
    #    2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2]],
    #   [2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3],
    #    1 - 2 * qvec[1]**2 - 2 * qvec[3]**2,
    #    2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1]],
    #   [2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2],
    #    2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1],
    #    1 - 2 * qvec[1]**2 - 2 * qvec[2]**2]])

cameras_intrinsic_file = os.path.join(args.source_path, "sparse/0", "cameras.bin")
cameras_extrinsic_file = os.path.join(args.source_path, "sparse/0", "images.bin")
path_to_model_file = os.path.join(args.source_path, "sparse/0", "points3D.bin")

## read_intrinsic_binary
cameras = {}
with open(cameras_intrinsic_file, "rb") as fid:
    num_cameras = read_next_bytes(fid, 8, "Q")[0]
    for _ in range(num_cameras):
        camera_properties = read_next_bytes(
            fid, num_bytes=24, format_char_sequence="iiQQ")
        camera_id = camera_properties[0]
        model_id = camera_properties[1]
        model_name = CAMERA_MODEL_IDS[camera_properties[1]].model_name
        width = camera_properties[2]
        height = camera_properties[3]
        num_params = CAMERA_MODEL_IDS[model_id].num_params
        params = read_next_bytes(fid, num_bytes=8*num_params,
                                 format_char_sequence="d"*num_params)
        cameras[camera_id] = Camera(id=camera_id,
                                    model=model_name,
                                    width=width,
                                    height=height,
                                    params=np.array(params))
    assert len(cameras) == num_cameras

## read_extrinsic_binary
images = {}
with open(cameras_extrinsic_file, "rb") as fid:
    num_reg_images = read_next_bytes(fid, 8, "Q")[0]
    for _ in range(num_reg_images):
        binary_image_properties = read_next_bytes(
            fid, num_bytes=64, format_char_sequence="idddddddi")
        image_id = binary_image_properties[0]
        qvec = np.array(binary_image_properties[1:5]) # 四元数
        tvec = np.array(binary_image_properties[5:8]) # 平移向量
        camera_id = binary_image_properties[8]
        image_name = ""
        current_char = read_next_bytes(fid, 1, "c")[0]
        while current_char != b"\x00":   # look for the ASCII 0 entry
            image_name += current_char.decode("utf-8")
            current_char = read_next_bytes(fid, 1, "c")[0]
        num_points2D = read_next_bytes(fid, num_bytes=8,
                                       format_char_sequence="Q")[0]
        x_y_id_s = read_next_bytes(fid, num_bytes=24*num_points2D,
                                   format_char_sequence="ddq"*num_points2D)
        xys = np.column_stack([tuple(map(float, x_y_id_s[0::3])),
                               tuple(map(float, x_y_id_s[1::3]))])
        point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3])))
        images[image_id] = ImageInfo(
            id=image_id, qvec=qvec, tvec=tvec,
            camera_id=camera_id, name=image_name,
            xys=xys, point3D_ids=point3D_ids) # xys是该图片上的特征点，point3D_ids是该图片特征点对应的点云位置，当存在特征点没有对应点云位置（其他图片没有该点）时，该特征点的point3D_ids为-1
        
## read_point3d_binary
with open(path_to_model_file, "rb") as fid:
    num_points = read_next_bytes(fid, 8, "Q")[0]

    xyzs = np.empty((num_points, 3))
    rgbs = np.empty((num_points, 3))
    errors = np.empty((num_points, 1))

    for p_id in range(num_points):
        binary_point_line_properties = read_next_bytes(
            fid, num_bytes=43, format_char_sequence="QdddBBBd")
        xyz = np.array(binary_point_line_properties[1:4])
        rgb = np.array(binary_point_line_properties[4:7])
        error = np.array(binary_point_line_properties[7])
        track_length = read_next_bytes(
            fid, num_bytes=8, format_char_sequence="Q")[0]
        track_elems = read_next_bytes(
            fid, num_bytes=8*track_length,
            format_char_sequence="ii"*track_length)
        xyzs[p_id] = xyz
        rgbs[p_id] = rgb
        errors[p_id] = error
        
## 3D Point Cloud Visualization
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D

def plot_3d_point_cloud(xyzs, rgbs):
    # 创建 3D 图形
    fig = plt.figure(figsize=(6, 6))
    ax = fig.add_subplot(111, projection='3d')
    # 绘制点云
    scatter = ax.scatter(xyzs[:, 0], xyzs[:, 1], xyzs[:, 2], 
                        c=rgbs, 
                        s=1)  # s 是点的大小
    # 设置轴标签
    ax.set_xlabel('X')
    ax.set_ylabel('Y')
    ax.set_zlabel('Z')
    # 设置标题
    plt.title('3D Point Cloud Visualization')
    # 显示图形
    plt.show()

# plot_3d_point_cloud(xyzs, rgbs/255.0)

## PLY File Format
# 源代码中目的就是把colmap输出.bin的点存到.ply中
from plyfile import PlyData, PlyElement
ply_path = os.path.join(args.source_path, "sparse/0", "points3D.ply")
plydata = PlyData.read(ply_path)
vertices = plydata['vertex']
positions = np.vstack([vertices['x'], vertices['y'], vertices['z']]).T
colors = np.vstack([vertices['red'], vertices['green'], vertices['blue']]).T / 255.0
normals = np.vstack([vertices['nx'], vertices['ny'], vertices['nz']]).T

# plot_3d_point_cloud(positions, colors)

##
from scene.dataset_readers import readColmapCameras, getNerfppNorm, focal2fov, CameraInfo, qvec2rotmat, getWorld2View2
from PIL import Image
reading_dir = "images"
llffhold = 8
cam_extrinsics, cam_intrinsics, images_folder = images, cameras, os.path.join(args.source_path, reading_dir)
cam_infos = []
for idx, key in enumerate(cam_extrinsics):

    extr = cam_extrinsics[key]
    intr = cam_intrinsics[extr.camera_id]
    height = intr.height
    width = intr.width

    uid = intr.id
    R = np.transpose(qvec2rotmat(extr.qvec))
    T = np.array(extr.tvec)

    if intr.model=="SIMPLE_PINHOLE":
        focal_length_x = intr.params[0]
        FovY = focal2fov(focal_length_x, height)
        FovX = focal2fov(focal_length_x, width)
    elif intr.model=="PINHOLE":
        focal_length_x = intr.params[0]
        focal_length_y = intr.params[1]
        FovY = focal2fov(focal_length_y, height)
        FovX = focal2fov(focal_length_x, width)
    else:
        assert False, "Colmap camera model not handled: only undistorted datasets (PINHOLE or SIMPLE_PINHOLE cameras) supported!"

    image_path = os.path.join(images_folder, os.path.basename(extr.name))
    image_name = os.path.basename(image_path).split(".")[0]
    image = Image.open(image_path)

    cam_info = CameraInfo(uid=uid, R=R, T=T, FovY=FovY, FovX=FovX, image=image,
                            image_path=image_path, image_name=image_name, width=width, height=height)
    cam_infos.append(cam_info)
train_cam_infos = [c for idx, c in enumerate(cam_infos) if idx % llffhold != 0]

## getNerfppNorm
def get_center_and_diag(cam_centers):
    cam_centers = np.hstack(cam_centers)
    avg_cam_center = np.mean(cam_centers, axis=1, keepdims=True)
    center = avg_cam_center
    dist = np.linalg.norm(cam_centers - center, axis=0, keepdims=True)
    diagonal = np.max(dist)
    return center.flatten(), diagonal

cam_centers = []

for cam in cam_infos:
    W2C = getWorld2View2(cam.R, cam.T)
    C2W = np.linalg.inv(W2C)
    cam_centers.append(C2W[:3, 3:4])

center, diagonal = get_center_and_diag(cam_centers)
radius = diagonal * 1.1
translate = -center
nerf_normalization = {"translate": translate, "radius": radius} # 获取整个视角中心点和范围

## plot camera and direction in world coordinate
centers = np.concatenate(cam_centers,1).transpose(1, 0)
# centers = np.stack([c.T for c in cam_infos],0)
def plot_3d_point_direction(points, directions, arrow_length=0.5):
    fig = plt.figure(figsize=(6, 6))
    ax = fig.add_subplot(111, projection='3d')
    # 绘制点
    ax.scatter(points[:, 0], points[:, 1], points[:, 2], c='r', s=50)
    # 绘制箭头
    for point, direction in zip(points, directions):
        # 归一化方向向量
        direction = direction / np.linalg.norm(direction)
        # 计算箭头终点
        end_point = point + direction * arrow_length
        
        # 绘制箭头
        ax.quiver(point[0], point[1], point[2], 
                  direction[0], direction[1], direction[2],
                  length=arrow_length, normalize=True, color='b')
    # 设置坐标轴标签
    ax.set_xlabel('X')
    ax.set_ylabel('Y')
    ax.set_zlabel('Z')
    # 设置坐标轴范围
    max_range = np.array([points[:, 0].max()-points[:, 0].min(),
                          points[:, 1].max()-points[:, 1].min(),
                          points[:, 2].max()-points[:, 2].min()]).max() / 2.0
    mid_x = (points[:, 0].max()+points[:, 0].min()) * 0.5
    mid_y = (points[:, 1].max()+points[:, 1].min()) * 0.5
    mid_z = (points[:, 2].max()+points[:, 2].min()) * 0.5
    ax.set_xlim(mid_x - max_range, mid_x + max_range)
    ax.set_ylim(mid_y - max_range, mid_y + max_range)
    ax.set_zlim(mid_z - max_range, mid_z + max_range)
    plt.title('3D Points with Direction Arrows')
    plt.show()

directions = np.stack([c.R.T @ np.array([0, 0, -1]) for c in cam_infos],0)
# plot_3d_point_direction(centers, directions)