import io
import gradio as gr
from os.path import join, exists
import re
from matplotlib import pyplot as plt
import trimesh
from utils.VTS_object import get_obj_info
from smplx import smplx
from utils.load_smplx_params import load_multiperson_smplx_params
import numpy as np
from utils.process_timestamps import txt_to_paried_frameids
from PIL import Image
import json
from utils.pyt3d_wrapper import Pyt3DWrapper


hho_mesh = None
dataset_dir = None
img = None


def project_mesh_to_camera(mesh, dataset_dir, camera, img, device="cuda:0"):
    extrinsic = np.loadtxt(
        join(dataset_dir, "extrinsic", camera, "camera2world.txt"))
    extrinsic = np.linalg.inv(extrinsic)
    with open(join(dataset_dir, "intrinsic", camera, "intrinsic.json"), "r") as f:
        intrinsic_data = json.load(f)
        intrinsic = np.array(
            intrinsic_data['intrinsic_matrix']).reshape((3, 3))
    intrinsic = intrinsic.T
    # print("intrinsic_matrix:", intrinsic)
    # print("extrinsic:", extrinsic)
#     intrinsic_matrix: [[ 0.00109511  0.          0.        ]
#  [ 0.          0.00109571  0.        ]
#  [-1.05214465 -0.60377021  1.        ]]
# extrinsic: [[-0.33628284  0.48356729 -0.8081315   2.00094801]
#  [-0.06864405 -0.8684101  -0.49107219  1.74926754]
#  [-0.93925601 -0.10966573  0.32522543 -0.99574711]
#  [ 0.          0.          0.          1.        ]]
    wapper = Pyt3DWrapper(image_size=img.size, use_fixed_cameras=True,
                          intrin=intrinsic, extrin=extrinsic, device=device)

    project_img = (wapper.render_meshes([mesh])[0]*255).astype(np.uint8)

    # projected_mesh = trimesh.Trimesh(
    #     vertices=vertices_img[:2, :].T, faces=mesh.faces)

    return project_img


def draw_mesh_on_image(img, projected_mesh, alpha=0.5):

    print(1)
    fig, ax = plt.subplots(figsize=(img.size[0]/80, img.size[1]/80), dpi=80)

    ax.imshow(img)

    for face in projected_mesh.faces:
        points = projected_mesh.vertices[face]
        ax.fill(points[:, 0], points[:, 1], 'r', alpha=alpha)

    ax.axis('off')

    buf = io.BytesIO()
    plt.savefig(buf, format='png', bbox_inches='tight', pad_inches=0)
    plt.close(fig)

    buf.seek(0)
    img = Image.open(buf)
    return img


def greet(camera, set, clip, device="cuda:0"):
    global hho_mesh, dataset_dir, img
    dataset_dir = "/share/hhodataset"
    obj_dataset_dir = "/data3/datasets/HHO_object_dataset_final"

    data_path = join(dataset_dir, camera, set, clip)
    vts_path = join(dataset_dir, "VTS", set, clip)
    # if vis folder not in the data_path, raise error
    if not exists(join(data_path, "vis")):
        raise gr.Error("vis folder not in the data_path")

    # get X from camera azureX
    pattern = r"azure(\d)"
    regex = re.compile(pattern)

    camera_num = regex.findall(camera)[0]

    vts_idx = 10

    _, obj_data_path = get_obj_info(vts_path, obj_dataset_dir)
    print(obj_data_path)
    origin_mesh = trimesh.load(obj_data_path)
    origin_vert, origin_face = origin_mesh.vertices, origin_mesh.faces
    origin_obj_pose = np.load(
        join(vts_path, "aligned_objposes.npy"), allow_pickle=True)

    smplx_model = smplx.create("/share/human_model/models", model_type="smplx", gender="neutral", use_face_contour=False,
                               num_betas=10, num_expression_coeffs=10, ext="npz", use_pca=True, num_pca_comps=12, flat_hand_mean=True).to(device)
    multiperson_SMPLX_params = load_multiperson_smplx_params(join(
        vts_path, "SMPLX_fitting"), start_frame=vts_idx, end_frame=vts_idx+1, device=device)
    origin_p1_SMPLX_params = multiperson_SMPLX_params["person1"]
    origin_p2_SMPLX_params = multiperson_SMPLX_params["person2"]

    origin_vert_seq = origin_vert @ origin_obj_pose[vts_idx][:3,
                                                             :3].T + origin_obj_pose[vts_idx][:3, 3]

    origin_mesh = trimesh.Trimesh(vertices=origin_vert_seq, faces=origin_face)

    ori_p1_beta, ori_p1_expression, ori_p1_body_pose, ori_p1_transl, ori_p1_global_orient, ori_p1_left_hand_pose, ori_p1_right_hand_pose = origin_p1_SMPLX_params["betas"][0].unsqueeze(0), origin_p1_SMPLX_params["expression"][0].unsqueeze(
        0), origin_p1_SMPLX_params["body_pose"][0].unsqueeze(0), origin_p1_SMPLX_params["transl"][0].unsqueeze(0), origin_p1_SMPLX_params["global_orient"][0].unsqueeze(0), origin_p1_SMPLX_params["left_hand_pose"][0].unsqueeze(0), origin_p1_SMPLX_params["right_hand_pose"][0].unsqueeze(0)

    ori_p2_beta, ori_p2_expression, ori_p2_body_pose, ori_p2_transl, ori_p2_global_orient, ori_p2_left_hand_pose, ori_p2_right_hand_pose = origin_p2_SMPLX_params["betas"][0].unsqueeze(0), origin_p2_SMPLX_params["expression"][0].unsqueeze(
        0), origin_p2_SMPLX_params["body_pose"][0].unsqueeze(0), origin_p2_SMPLX_params["transl"][0].unsqueeze(0), origin_p2_SMPLX_params["global_orient"][0].unsqueeze(0), origin_p2_SMPLX_params["left_hand_pose"][0].unsqueeze(0), origin_p2_SMPLX_params["right_hand_pose"][0].unsqueeze(0)
    ori_p1_model = smplx_model(betas=ori_p1_beta, expression=ori_p1_expression, body_pose=ori_p1_body_pose, transl=ori_p1_transl,
                               global_orient=ori_p1_global_orient, left_hand_pose=ori_p1_left_hand_pose, right_hand_pose=ori_p1_right_hand_pose, return_verts=True)
    ori_p2_model = smplx_model(betas=ori_p2_beta, expression=ori_p2_expression, body_pose=ori_p2_body_pose, transl=ori_p2_transl,
                               global_orient=ori_p2_global_orient, left_hand_pose=ori_p2_left_hand_pose, right_hand_pose=ori_p2_right_hand_pose, return_verts=True)

    ori_p1_mesh = trimesh.Trimesh(vertices=ori_p1_model.vertices.detach().cpu().numpy()[
                                  0], faces=ori_p1_model.faces.detach().cpu().numpy())
    ori_p2_mesh = trimesh.Trimesh(vertices=ori_p2_model.vertices.detach().cpu().numpy()[
                                  0], faces=ori_p2_model.faces.detach().cpu().numpy())

    hho_mesh = trimesh.util.concatenate([origin_mesh, ori_p1_mesh, ori_p2_mesh])

    # get rgb
    paired_frames = txt_to_paried_frameids(
        join(vts_path, "aligned_frame_ids.txt"))
    rgb_idx = paired_frames[int(vts_idx)][int(camera_num)-1]

    rgb_path = join(data_path, "vis", "color",
                    "{}.jpg".format(str(rgb_idx).zfill(5)))

    # open image and return PIL.Image
    img = Image.open(rgb_path)
    project_img_np = project_mesh_to_camera(hho_mesh, dataset_dir, camera, img)
    project_img = Image.fromarray(project_img_np)
    # 设置透明度
    alpha = 0.9

    # 创建一个透明图层，与 image1 相同的大小和模式
    overlay = Image.new('RGBA', img.size, (0, 0, 0, 0))

    # 将 image2 透明度调整为 alpha，并将其粘贴到 overlay 图层上
    image2_blend = project_img.copy().convert('RGBA')
    image2_blend.putalpha(int(255 * alpha))
    overlay.paste(image2_blend, (0, 0), mask=image2_blend)

    # 将 overlay 图层与 image1 进行混合
    result = Image.alpha_composite(img.convert('RGBA'), overlay)

    return result


def adjust(camera,  trans_x, trans_y, trans_z, rot_x, rot_y, rot_z, device="cuda:0"):
    global hho_mesh, dataset_dir, img
    if dataset_dir is None or camera is None or img is None or hho_mesh is None:
        raise ValueError("dataset_dir and camera must not be None")
    extrinsic = np.loadtxt(
        join(dataset_dir, "extrinsic", camera, "camera2world.txt"))
    extrinsic = np.linalg.inv(extrinsic)
    with open(join(dataset_dir, "intrinsic", camera, "intrinsic.json"), "r") as f:
        intrinsic_data = json.load(f)
        intrinsic = np.array(
            intrinsic_data['intrinsic_matrix']).reshape((3, 3))
    intrinsic = intrinsic.T
    # print("intrinsic_matrix:", intrinsic)
    # print("extrinsic:", extrinsic)
#     intrinsic_matrix: [[ 0.00109511  0.          0.        ]
#  [ 0.          0.00109571  0.        ]
#  [-1.05214465 -0.60377021  1.        ]]
# extrinsic: [[-0.33628284  0.48356729 -0.8081315   2.00094801]
#  [-0.06864405 -0.8684101  -0.49107219  1.74926754]
#  [-0.93925601 -0.10966573  0.32522543 -0.99574711]
#  [ 0.          0.          0.          1.        ]]
    # 对外参做一个平移
    print([trans_x, trans_y, trans_z])
    print(extrinsic)
    [rot_x, rot_y, rot_z] = [rot_x/180*np.pi, rot_y/180*np.pi, rot_z/180*np.pi]
    # extrinsic[:3, 3] += [trans_x, trans_y, trans_z]
    # trans_x, trans_y, trans_z, rot_x, rot_y, rot_z to form a rotation matrix
    rotate_x = np.array([[1, 0, 0], [0, np.cos(rot_x), -
                        np.sin(rot_x)], [0, np.sin(rot_x), np.cos(rot_x)]])
    rotate_y = np.array([[np.cos(rot_y), 0, np.sin(rot_y)], [
                        0, 1, 0], [-np.sin(rot_y), 0, np.cos(rot_y)]])
    rotate_z = np.array([[np.cos(rot_z), -np.sin(rot_z), 0],
                        [np.sin(rot_z), np.cos(rot_z), 0], [0, 0, 1]])
    rotate = rotate_x @ rotate_y @ rotate_z

    extrinsic[:3, :3] = rotate @ extrinsic[:3, :3]
    extrinsic[:3, 3] = extrinsic[:3, 3] + [trans_x, trans_y, trans_z]

    print(extrinsic)
    wapper = Pyt3DWrapper(image_size=img.size, use_fixed_cameras=True,
                          intrin=intrinsic, extrin=extrinsic, device=device)

    project_img_np = (wapper.render_meshes([hho_mesh])[0]*255).astype(np.uint8)

    project_img = Image.fromarray(project_img_np)
    # 设置透明度
    alpha = 0.9

    # 创建一个透明图层，与 image1 相同的大小和模式
    overlay = Image.new('RGBA', img.size, (0, 0, 0, 0))

    # 将 image2 透明度调整为 alpha，并将其粘贴到 overlay 图层上
    image2_blend = project_img.copy().convert('RGBA')
    image2_blend.putalpha(int(255 * alpha))
    overlay.paste(image2_blend, (0, 0), mask=image2_blend)

    # 将 overlay 图层与 image1 进行混合
    result = Image.alpha_composite(img.convert('RGBA'), overlay)

    return result


with gr.Blocks() as demo:
    camera = gr.Radio(
        choices=["azure2", "azure3", "azure4", "azure5"], label="camera", value="azure2")
    set = gr.Textbox(label="set", value="20231002")
    clip = gr.Textbox(label="clip", value="024")
    output = gr.Image(type="pil")
    greet_btn = gr.Button("init")
    greet_btn.click(fn=greet, inputs=[
                    camera, set, clip], outputs=output, api_name="greet")
    trans_x = gr.Slider(minimum=-1, maximum=1, step=0.001,
                        value=0, label="X_translation")
    trans_y = gr.Slider(minimum=-1, maximum=1, step=0.001,
                        value=0, label="Y_translation")
    trans_z = gr.Slider(minimum=-1, maximum=1, step=0.001, value=0,
                        label="Z_translation")
    rot_x = gr.Slider(minimum=-10, maximum=10, step=0.01,
                      value=0, label="X_axis_rotation(degree)")
    rot_y = gr.Slider(minimum=-10, maximum=10, step=0.01,
                      value=0, label="Y_axis_rotation")
    rot_z = gr.Slider(minimum=-10, maximum=10, step=0.01,
                      value=0, label="Z_axis_rotation")
    output2 = gr.Image(type="pil")
    adjust_btn = gr.Button("adjust")
    adjust_btn.click(fn=adjust, inputs=[
                     camera, trans_x, trans_y, trans_z, rot_x, rot_y, rot_z], outputs=output2, api_name="adjust")

# demo = gr.Interface(fn={"greet": greet, "another_function": another_function}, inputs=[gr.Radio(
#     choices=["azure2", "azure3", "azure4", "azure5"], label="camera"),
#     "text",
#     "text"], outputs=gr.Image(type="pil"))
demo.launch(share=True)
