import os
import json
import bpy
import mathutils
import numpy as np
import random as rnd
from math import radians
from scipy.spatial.transform import Rotation as R
# from mathutils import Vector

from types import SimpleNamespace

def ensure_dir(file_path):
    directory = os.path.dirname(file_path)
    if not os.path.exists(directory):
        os.makedirs(directory)
        print("creat dir :", directory)

def parse_matrix(transfrom):
    if isinstance(transfrom, list):
        transfrom = np.asarray(transfrom)
    cam_loc = transfrom[:3,3]
    cam_rot = R.from_matrix(transfrom[:3, :3]).as_euler('xyz', degrees=False)
    return cam_loc, cam_rot

def listify_matrix(matrix):
    matrix_list = []
    for row in matrix:
        matrix_list.append(list(row))
    return matrix_list

def getRndCameraPos(huge_dict):
    n1 = rnd.random()
    n2 = rnd.random()
    n3 = rnd.random()
    return ( huge_dict.VIEW_CELL_CENTER[0] + (n1 - 0.5) * huge_dict.VIEW_CELL_SIZE[0], huge_dict.VIEW_CELL_CENTER[1] + (n2 - 0.5) * huge_dict.VIEW_CELL_SIZE[1],huge_dict.VIEW_CELL_CENTER[2] + (n3 - 0.5) * huge_dict.VIEW_CELL_SIZE[2])

def getRndCameraRot(huge_dict):
    n1 = huge_dict.VIEW_ROT_START[0] +( ( rnd.random() - 0.5) * huge_dict.VIEW_ROT_RESTR[0] )
    n2 = huge_dict.VIEW_ROT_START[1] +( ( rnd.random() - 0.5) * huge_dict.VIEW_ROT_RESTR[1] )
    n3 = huge_dict.VIEW_ROT_START[2] +( ( rnd.random() - 0.5) * huge_dict.VIEW_ROT_RESTR[2] )

    return radians(n1) ,radians(n2) ,radians(n3)


def renderSet(huge_dict, fp, scene, subf, num_views, traj, depth_file_output, normal_file_output):
    out_data = {
        'camera_angle_x': bpy.data.objects[huge_dict.CAM_NAME].data.angle_x,
        'view_cell_center': huge_dict.VIEW_CELL_CENTER,
        'view_cell_size': huge_dict.VIEW_CELL_SIZE,
        'random_seed': huge_dict.SEED,
    }

    # print(f"VIEW CELL CENTER {out_data['view_cell_center']}")

    cam = scene.objects[huge_dict.CAM_NAME]

    # cam.rotation_euler[0] =radians( huge_dict.VIEW_ROT_START[0] )
    # cam.rotation_euler[1] =radians( huge_dict.VIEW_ROT_START[1] )
    # cam.rotation_euler[2] =radians( huge_dict.VIEW_ROT_START[2] )
    # cam.location = (huge_dict.VIEW_CELL_CENTER[0], huge_dict.VIEW_CELL_CENTER[1], huge_dict.VIEW_CELL_CENTER[2])
    # bpy.context.view_layer.update()
    # print(listify_matrix(cam.matrix_world))
    out_data['camera_base_orientation'] = listify_matrix(cam.matrix_world)
    out_data['frames'] = []


    for i in range(huge_dict.VIEWS_OFFSET, huge_dict.VIEWS_OFFSET+num_views):
        print(f"(Rendering {subf} file {i}")
        scene.render.filepath = fp + f'/{subf}/' + f'{i:05d}'
        depth_file_output.file_slots[0].path = scene.render.filepath + "_depth"
        normal_file_output.file_slots[0].path = scene.render.filepath + "_normal"
        
        if "transform_matrix" in traj[i]:
            cam_loc, cam_rot = parse_matrix(traj[i]["transform_matrix"])
        else:
            cam_loc = (traj[i]["x"], traj[i]["y"], traj[i]["z"])
            cam_rot = (traj[i]["eu_x"], traj[i]["eu_y"], traj[i]["eu_z"])

        cam.location = cam_loc
        # cam.location = getRndCameraPos(huge_dict)

        # r1, r2, r3 = getRndCameraRot(huge_dict)
        r1, r2, r3 = cam_rot[0], cam_rot[1], cam_rot[2]

        cam.rotation_euler[0] = r1
        cam.rotation_euler[1] = r2
        cam.rotation_euler[2] = r3

        skip_existing = False
        bpy.context.view_layer.update()
        # if os.path.exists(fp + f'/{subf}/' + f'{i:05d}' + '_depth.npz') and huge_dict.SKIP_EXISTING_FILES:
        #     print(f"Skipping existing file {scene.render.filepath}!")
        #     skip_existing = True

        if huge_dict.SKIP_EXISTING_FILES:
            if os.path.exists(fp + f'/{subf}/' + f'{i:05d}' + '.png') and os.path.exists(fp + f'/{subf}/' + f'{i:05d}' + '_depth0001.exr') and os.path.exists(fp + f'/{subf}/' + f'{i:05d}' + '_normal0001.png'):
                print(f"Skipping existing file {scene.render.filepath}!")
                skip_existing = True

        if not huge_dict.DEBUG and not skip_existing:
            # sp file dir

            bpy.ops.render.render(write_still=True)

            # pixels = bpy.data.images[huge_dict.RENDER_IMG_NAME].pixels

            # # copy buffer to numpy array for faster manipulation
            # arr = np.array(pixels[:])
            # depth = np.array(arr.reshape(-1,4)[:,0], dtype=np.float32)

            # print(f"Max depth is: {depth.max()}")
            # print(depth.shape)

            # depth = depth.reshape(huge_dict.RESOLUTION_Y, huge_dict.RESOLUTION_X) # reshape the image with w,h
            # depth = np.flip(depth, axis=0) # flip the image vertically
            # fcp = fp + f'/{subf}/' + f'{i:05d}' + '_depth.npz'
            # np.savez(fcp, depth)

        bpy.context.view_layer.update()
        frame_data = {
            'file_path': f"./{subf}/{i:05d}",
            'rotation': 0,
            'transform_matrix': listify_matrix(cam.matrix_world)
        }
        out_data['frames'].append(frame_data)
        with open(fp + '/' + f'transforms_{subf}_save.json', 'a')as out_file:
            json.dump(frame_data, out_file, indent=4)

    if not huge_dict.DEBUG:
        #print(out_data)
        with open(fp + '/' + f'transforms_{subf}.json', 'w') as out_file:
            json.dump(out_data, out_file, indent=4)
    else:
        print(out_data)


def create_set(huge_dict, traj, dnode, nnode):
    fp = bpy.path.abspath(f"//{huge_dict.RESULTS_PATH}")

    ensure_dir(fp)

    scene = bpy.context.scene
    scene.render.resolution_x = huge_dict.RESOLUTION_X
    scene.render.resolution_y = huge_dict.RESOLUTION_Y
    scene.render.resolution_percentage = 100
    cam = scene.objects[huge_dict.CAM_NAME]

    bpy.data.cameras[huge_dict.CAM_NAME].clip_start = 0.2 # aviod clipping 

    renderSet(huge_dict, fp,scene, huge_dict.MODE, huge_dict.VIEWS_TRAIN, traj, dnode, nnode)
    # renderSet(huge_dict, fp,scene,'test', huge_dict.VIEWS_TEST)
    # renderSet(huge_dict, fp,scene,'val', huge_dict.VIEWS_VAL)

def export_view_cells(SEED=42, 
                        DEBUG=False, 
                        VIEWS_CAM_PATH=40, 
                        VIEWS=None, 
                        VIEWS_TRAIN=None, 
                        VIEWS_VAL=None, 
                        VIEWS_TEST=None, 
                        VIEWS_OFFSET=0, 
                        RESOLUTION=800, 
                        RESOLUTION_X=None, 
                        RESOLUTION_Y=None, 
                        COLOR_DEPTH=8, 
                        FORMAT='PNG', 
                        VIEW_CELL_CENTER=None, 
                        VIEW_CELL_SIZE=None, 
                        VIEW_ROT_START=None, 
                        VIEW_ROT_RESTR=None, 
                        SKIP_EXISTING_FILES=False, 
                        CAM_NAME="renderCam", 
                        RENDER_IMG_NAME="Viewer Node", 
                        SCENE_NAME=None,
                        traj = None,
                        MODE = "train"):


    if SCENE_NAME is None:
        print("Error: Please specify SCENE_NAME!")
        return

    huge_dict = SimpleNamespace()
    huge_dict.SEED = SEED

    rnd.seed(huge_dict.SEED)

    huge_dict.DEBUG = DEBUG
    huge_dict.VIEWS_CAM_PATH = VIEWS_CAM_PATH
    huge_dict.VIEWS = VIEWS
    huge_dict.VIEWS_TRAIN = VIEWS_TRAIN
    huge_dict.VIEWS_VAL = VIEWS_VAL
    huge_dict.VIEWS_TEST = VIEWS_TEST
    huge_dict.MODE = MODE

    if huge_dict.VIEWS_TRAIN is None:
        huge_dict.VIEWS_TRAIN = huge_dict.VIEWS

    if huge_dict.VIEWS_VAL is None:
        huge_dict.VIEWS_VAL = huge_dict.VIEWS

    if huge_dict.VIEWS_TEST is None:
        huge_dict.VIEWS_TEST = huge_dict.VIEWS

    # if VIEW_CELL_CENTER is None or VIEW_CELL_SIZE is None or VIEW_ROT_START is None or VIEW_ROT_RESTR is None or huge_dict.VIEWS_TRAIN is None or huge_dict.VIEWS_VAL is None or huge_dict.VIEWS_TEST is None:
    #     print("Error: VIEW_CELL* and VIEWS_* parameters need to be specified!")
    #     return


    huge_dict.VIEWS_OFFSET = VIEWS_OFFSET
    huge_dict.RESOLUTION = RESOLUTION

    huge_dict.RESOLUTION_X = RESOLUTION
    huge_dict.RESOLUTION_Y = RESOLUTION

    if RESOLUTION_X is not None and RESOLUTION_Y is not None:
        huge_dict.RESOLUTION_X = RESOLUTION_X
        huge_dict.RESOLUTION_Y = RESOLUTION_Y


    huge_dict.COLOR_DEPTH = COLOR_DEPTH
    huge_dict.FORMAT = FORMAT
    huge_dict.VIEW_CELL_CENTER = VIEW_CELL_CENTER
    huge_dict.VIEW_CELL_SIZE = VIEW_CELL_SIZE
    huge_dict.VIEW_ROT_START = VIEW_ROT_START
    huge_dict.VIEW_ROT_RESTR = VIEW_ROT_RESTR
    huge_dict.SKIP_EXISTING_FILES = SKIP_EXISTING_FILES
    huge_dict.CAM_NAME = CAM_NAME
    huge_dict.RENDER_IMG_NAME = RENDER_IMG_NAME

    huge_dict.RESULTS_PATH = SCENE_NAME + '_' + MODE + "_" + str(VIEWS) + "Views"
    huge_dict.FORMAT = 'OPEN_EXR' # "PNG"
    
    # ensure use Node 
    bpy.data.scenes["Scene"].use_nodes = True
    tree = bpy.context.scene.node_tree
    nodes = tree.nodes
    links = tree.links
    # remove existing nodes
    for n in nodes:
        tree.nodes.remove(n)

    render_layers = tree.nodes.new('CompositorNodeRLayers')
    depth_file_output = tree.nodes.new(type="CompositorNodeOutputFile")
    depth_file_output.label = 'Depth Output'
    if huge_dict.FORMAT == "OPEN_EXR":
        depth_file_output.format.file_format = 'OPEN_EXR'
        links.new(render_layers.outputs['Depth'], depth_file_output.inputs[0])
        depth_file_output.base_path = bpy.path.abspath(f"//{huge_dict.RESULTS_PATH}")
    else:
        # Remap as other types can not represent the full range of depth.
        map = tree.nodes.new(type="CompositorNodeMapValue")
        # Size is chosen kind of arbitrarily, try out until you're satisfied with resulting depth map.
        map.offset = [-0.7]
        map.size = [1.4] # DEPTH_SCALE
        map.use_min = True
        map.min = [0]
        links.new(render_layers.outputs['Depth'], map.inputs[0])
        links.new(map.outputs[0], depth_file_output.inputs[0])
    normal_file_output = tree.nodes.new(type="CompositorNodeOutputFile")
    normal_file_output.label = 'Normal Output'
    normal_file_output.base_path = bpy.path.abspath(f"//{huge_dict.RESULTS_PATH}") 
    links.new(render_layers.outputs['Normal'], normal_file_output.inputs[0])

    bpy.context.scene.render.dither_intensity = 0.0
    bpy.context.scene.render.film_transparent = True


    for output_node in [depth_file_output, normal_file_output]:
        output_node.base_path = ''

    try:
        bpy.context.scene.view_layers["ViewLayer"].use_pass_z = True
        bpy.context.scene.view_layers["ViewLayer"].use_pass_normal = True
    except:
        bpy.context.scene.view_layers["RenderLayer"].use_pass_z = True
        bpy.context.scene.view_layers["RenderLayer"].use_pass_normal = True

    bpy.context.scene.render.image_settings.file_format = 'PNG'
    bpy.context.scene.render.use_compositing = True # enable compositing
    create_set(huge_dict, traj, depth_file_output, normal_file_output)

    # Reset to initial cam position
    cam = bpy.context.scene.objects[huge_dict.CAM_NAME]
    if "transform_matrix" in traj[0]:
        init_loc, init_rot = parse_matrix(traj[0]["transform_matrix"])
    else:
        init_loc = (traj[0]["x"], traj[0]["y"], traj[0]["z"])
        init_rot = (traj[0]["eu_x"], traj[0]["eu_y"], traj[0]["eu_z"])

    cam.rotation_euler[0] = init_rot[0]
    cam.rotation_euler[1] = init_rot[1]
    cam.rotation_euler[2] = init_rot[2]
    cam.location = init_loc
    bpy.context.view_layer.update()


# main render file
if __name__=="__main__":
    # VIEW_CELL_CENTER = [ 2, 2, 1.0]
    # VIEW_CELL_SIZE = [2.0, 2.0, 2.0]
    # VIEW_ROT_START = [90.0,0,0]
    # VIEW_ROT_RESTR = [20,0,30]

    # remember check: camera near clip, post process

    import json
    # target_traj_path = "/Users/zhaixing/projects/blender_script/DONeRF dataset/barbershop/transforms_train.json"
    # MODE = "train" # "test" "val"
    # target_traj_path = os.path.join("/data/yangchen/datasets_in_papers/DONeRF/barbershop", f"transforms_{MODE}.json")

    def load_camera_trajectory(path):
        with open(path, 'r') as f:
            data = json.load(f)
        return data['frames']
    
    # target_traj_paths = [os.path.join("/data/yangchen/datasets_in_papers/DONeRF/barbershop", f"transforms_{MODE}.json") for MODE in ["train", "val", "test"]]
    # target_traj_paths.append("/data/yangchen/blender_files/free_view_test_trajectory.json")
    # target_traj_paths.append("/data/yangchen/blender_files/traj_man_made_post.json")

    # mode_list = ["train", "val","test","free_test", "free"]
    

    for idx, target_traj_path in enumerate(target_traj_paths):

           # NOTE!!! We NEED HIGH QUALITY RENDERING IMAGE (denoise final, sample final to be serve as DATASET), 
           # IT IS RATHER SLOW!!!

        traj = load_camera_trajectory(target_traj_path)

        VIEWS = len(traj)
        SCENE_NAME = 'barbershop'
        RESOLUTION_X = 800
        RESOLUTION_Y = 800

        CAM_NAME = "Camera"

        export_view_cells(VIEWS=VIEWS,
        traj = traj,
        MODE = mode_list[idx],
        SCENE_NAME=SCENE_NAME,
        CAM_NAME=CAM_NAME,
        RESOLUTION_X=RESOLUTION_X,
        RESOLUTION_Y=RESOLUTION_Y,
        SKIP_EXISTING_FILES=True,)