
import os
import glob
import json
import pyexr
import matplotlib.pyplot as plt
from logging import Logger
import cv2
import imageio
import numpy as np

from daniel_tools.img_utils import *
from diffsynth.data.utils import colorize_depth_map

from os.path import isdir, join


root = "/baai-cwm-vepfs/cwm/shaocong.xu/exp/DiffSynth-Studio/data/glassverse/glassverse_v0_filtered_examples"
seq_names = [x for x in os.listdir(root) if isdir(join(root,x))]

np.random.shuffle(seq_names)
seq_name = seq_names[0]
src = f'/{root}/{seq_name}'

save_root = f'logs/rendering_vis/licheng_final-final/{seq_name}/'

os.makedirs(save_root, exist_ok=True)

rgbs = sorted(glob.glob(os.path.join(src, '*.png')))

srgb_imgs = []

# 转换为numpy数组并应用gamma变换
for x in rgbs:
    frame_np = np.array(Image.open(x), dtype=np.float32) / 255.0
    # gamma = 2.2 (标准sRGB到线性的转换)
    gamma = 2.2
    frame_np = np.power(frame_np, 1 / gamma)
    # 转换回0-255范围
    frame_np = (frame_np * 255.0).astype(np.uint8)
    frame_pil = Image.fromarray(frame_np)

    srgb_imgs.append(np.array(frame_pil)[:, :, :-1][:, :, ::-1])

imgs2video2(srgb_imgs, fps=25, out_path=join(save_root, 'rendering-rgb.mp4'))


def blender_world_normal_2_opengl_camera(normals_world: np.ndarray, c2w: np.ndarray) -> np.ndarray:
    H, W, C = normals_world.shape
    if C == 4:
        normals_world = normals_world[..., :3]

    R_c2w = c2w[:3, :3]
    R_opencv = R_c2w.T

    transformed_normals = normals_world.reshape(-1, 3).T
    transformed_normals = R_opencv @ transformed_normals
    transformed_normals = transformed_normals.T
    transformed_normals = transformed_normals.reshape(H, W, 3)
    return transformed_normals


with open(os.path.join(src, 'transforms.json'), 'r') as f:
    data = json.load(f)

normal_files = sorted(glob.glob(os.path.join(src, 'normal', '*.exr')))

camera_normals = []
for normal, camera_parameter in zip(normal_files, data['frames']):
    raw_normal = pyexr.open(normal).get()  # * rgba, 最后一个通道是alpha, mask, 或者说不透明度

    transform_matrix = np.array(camera_parameter['transform_matrix']).reshape(4, 4)

    camera_normal = blender_world_normal_2_opengl_camera(raw_normal, transform_matrix, )

    camera_normal = (camera_normal + 1) / 2

    # plt.imshow(camera_normal)
    # plt.show()

    camera_normal = (camera_normal * 255).astype(np.uint8)[:, :, ::-1]

    camera_normals.append(camera_normal)

imgs2video2(camera_normals, fps=25, out_path=join(save_root, 'normal.mp4'))


def vis_mask(invalid_mask):
    highlight_cmap = 'hot'
    highlight_img = (matplotlib.colormaps[highlight_cmap](invalid_mask.astype(np.float32), bytes=False)[:, :, 0:3] * 255).astype(np.uint8)
    return Image.fromarray(highlight_img)


depth_file = sorted(glob.glob(os.path.join(src, 'depth', '*.exr')))

depth_list = []
mask_list = []
DEPTH_EPS = 1e-2

debug_list = []

min_depth = float('inf')
max_depth = float('-inf')

for depth in depth_file:
    raw_depth = pyexr.open(depth).get()
    # print(f"raw_depth dtype: {raw_depth.dtype}")  # 查看数据类型，是float32还是float16

    raw_depth = raw_depth[:, :, :-1].mean(-1)
    # print(f"raw_depth: {raw_depth.max()}, {raw_depth.min()}")

    invalid_mask = np.logical_or(raw_depth < DEPTH_EPS, raw_depth > 70.0)

    raw_depth[invalid_mask] = 0
    if raw_depth.min() < min_depth:
        min_depth = raw_depth.min()
    if raw_depth.max() > max_depth:
        max_depth = raw_depth.max()

    vis_depth = colorize_depth_map(raw_depth)

    depth_list.append(np.array(vis_depth)[:, :, ::-1])

    debug_list += np.unique(invalid_mask).tolist()

    mask_vis = vis_mask(invalid_mask)

    mask_list.append(np.array(mask_vis)[:, :, ::-1])

imgs2video2(depth_list, fps=25, out_path=os.path.join(save_root, 'depth.mp4'))
imgs2video2(mask_list, fps=25, out_path=os.path.join(save_root, 'mask.mp4'))

# print(f'min:{min_depth}, \nmax:{max_depth}, \n unique:{np.unique(debug_list)}')

DEBUG = False

if DEBUG:
    # plt.imshow(invalid_mask )
    # 找到invalid_mask为1的像素位置
    invalid_indices = np.argwhere(invalid_mask == 1)

    # 放大这些像素点以便可视化
    plt.figure(figsize=(6, 6))
    plt.imshow(invalid_mask, cmap='gray')
    if len(invalid_indices) > 0:
        y, x = invalid_indices[:, 0], invalid_indices[:, 1]
        plt.scatter(x, y, color='red', s=20, marker='o', label='invalid')
    plt.title('Invalid Mask (highlighted)')
    plt.legend()
    plt.show()

mp4_files = sorted(glob.glob(os.path.join(save_root, '*mp4')))

# 读取所有视频
caps = [cv2.VideoCapture(f) for f in mp4_files]
frame_lists = []

# 读取每个视频的所有帧
for cap in caps:
    frames = []
    while True:
        ret, frame = cap.read()
        if not ret:
            break
        frames.append(frame)
    frame_lists.append(frames)
    cap.release()

# 取最短视频长度，保证帧数一致
min_len = min(len(frames) for frames in frame_lists)

# 拼接帧
concat_frames = []
for i in range(min_len):
    concat_frame = cv2.hconcat([frames[i] for frames in frame_lists])
    concat_frames.append(concat_frame)

# 写入新视频
h, w, _ = concat_frames[0].shape
out_path = os.path.join(save_root, 'concat_vis.mp4')
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
out = cv2.VideoWriter(out_path, fourcc, 25, (w, h))

for frame in concat_frames:
    out.write(frame)
out.release()

# 同时保存为GIF
# OpenCV的BGR转RGB
concat_frames_rgb = [cv2.cvtColor(f, cv2.COLOR_BGR2RGB) for f in concat_frames]
gif_out_path = os.path.join(save_root, 'concat_vis.gif')
# imageio要求uint8
imageio.mimsave(gif_out_path, concat_frames_rgb, fps=25)

# 可视化 'Spectral' colormap
cmap = matplotlib.colormaps['Spectral']
gradient = np.linspace(0, 1, 256).reshape(1, -1)
plt.figure(figsize=(6, 1))
plt.imshow(gradient, aspect='auto', cmap=cmap)
plt.title('Spectral colormap')

# 显示 value range
plt.xlabel('Value')
plt.xticks([0, 255], ['0.0', '1.0'])
plt.yticks([])

# plt.savefig(join(save_root,'Spectral.png'))
plt.show()

plt.close()
