import blenderproc as bproc
import argparse
import os
import numpy as np
from mathutils import Matrix
import pdb
import bpy
import math


""""

set the material of  'new_objects'

"""
def set_material(new_objects):
    # * load material 
    mat = bproc.material.create("charging_arm_material")
    image = bpy.data.images.load(filepath=new_obj_path.replace('obj', 'jpg'))
    mat.set_principled_shader_value("Base Color", image)
    #* set material 
    for obj in new_objects:
        if not obj.get_materials():
            obj.add_material(mat)

        
def equator_cameras_sequence(i, num_views, allow_theta_fixed=False, return_radians=True):  
    """  
    根据索引 i 生成单个相机的赤道位置，phi 均匀分布，theta 可选随机。  
    
    参数:  
    i (int): 当前相机的索引（从 0 开始）。  
    num_views (int): 相机位置的总数量。  
    allow_theta_random (bool): 是否允许 theta 在 -20 到 40 度之间随机分布。  
    return_radians (bool): 是否返回弧度值（默认返回角度值）。  
    
    返回:  
    tuple: 包含 (phi, theta) 的元组，单位为度或弧度。  
    """  
    phi = (360 / num_views) * i  # 均匀分布 phi  
    if allow_theta_fixed:  
        # theta = random.uniform(-20, 40)  # theta 在 -20 到 40 度之间随机  
        theta = (20 - (-20)) * i / num_views + (-20)
    else:  
        theta = 0  # 固定 theta 为 0 度，表示赤道平面  
    
    if return_radians:  
        # 将角度转换为弧度  
        phi = math.radians(phi)  
        theta = math.radians(theta)  
    
    return [phi, theta]


def render_around():

    cam_poses = 0
    num_views = 40
    while cam_poses < num_views:
        # Sample location
        
        sample_around = False 

        if sample_around:
            # 设置相机参数
            # num_views = 12  # 相机位置的总数量
            radius = 2.0  # 相机距离原点的半径
            fov = math.radians(60)  # 视野角度（FOV），单位为弧度


            # 获取相机的 phi 和 theta
            phi, theta = equator_cameras_sequence(cam_poses, num_views, allow_theta_fixed=True, return_radians=True)

            # 计算相机的位置
            location = (
                radius * np.cos(phi) * np.cos(theta),
                radius * np.sin(phi) * np.cos(theta),
                radius * np.sin(theta)
            )

            # 计算相机的旋转矩阵
            rotation_matrix = bproc.camera.rotation_from_forward_vec(-np.array(location), inplane_rot=0)

            # 构建相机的变换矩阵
            cam2world_matrix = bproc.math.build_transformation_mat(location, rotation_matrix)

            # 添加相机姿态
            bproc.camera.add_camera_pose(cam2world_matrix)

            # 设置相机镜头参数（焦距）
            focal_length = 16 / np.tan(fov / 2)
            bproc.camera.set_intrinsics_from_blender_params(lens=focal_length, lens_unit='MILLIMETERS')
            cam_poses += 1
        data = bproc.renderer.render()
        

parser = argparse.ArgumentParser()
parser.add_argument('bop_parent_path', help="Path to the bop datasets parent directory")
parser.add_argument('cc_textures_path', default="resources/cctextures", help="Path to downloaded cc textures")
parser.add_argument('output_dir', help="Path to where the final files will be saved ")
parser.add_argument('--num_scenes', type=int, default=2000, help="How many scenes with 25 images each to generate")
args = parser.parse_args()

bproc.init()

bproc.camera.set_resolution(1024, 1024)  


# load bop objects into the scene
target_bop_objs = bproc.loader.load_bop_objs(bop_dataset_path = os.path.join(args.bop_parent_path, 'lm'), mm2m = True)

# load distractor bop objects
tless_dist_bop_objs = bproc.loader.load_bop_objs(bop_dataset_path = os.path.join(args.bop_parent_path, 'tless'), model_type = 'cad', mm2m = True)
ycbv_dist_bop_objs = bproc.loader.load_bop_objs(bop_dataset_path = os.path.join(args.bop_parent_path, 'ycbv'), mm2m = True)
tyol_dist_bop_objs = bproc.loader.load_bop_objs(bop_dataset_path = os.path.join(args.bop_parent_path, 'tyol'), mm2m = True)

# load BOP datset intrinsics
bproc.loader.load_bop_intrinsics(bop_dataset_path = os.path.join(args.bop_parent_path, 'lm'))

# set shading and hide objects
for obj in (target_bop_objs + tless_dist_bop_objs + ycbv_dist_bop_objs + tyol_dist_bop_objs):
    obj.set_shading_mode('auto')
    obj.hide(True)


# create room
room_planes = [bproc.object.create_primitive('PLANE', scale=[2, 2, 1]),
               bproc.object.create_primitive('PLANE', scale=[2, 2, 1], location=[0, -2, 2], rotation=[-1.570796, 0, 0]),
               bproc.object.create_primitive('PLANE', scale=[2, 2, 1], location=[0, 2, 2], rotation=[1.570796, 0, 0]),
               bproc.object.create_primitive('PLANE', scale=[2, 2, 1], location=[2, 0, 2], rotation=[0, -1.570796, 0]),
               bproc.object.create_primitive('PLANE', scale=[2, 2, 1], location=[-2, 0, 2], rotation=[0, 1.570796, 0])]

# sample light color and strenght from ceiling
light_plane = bproc.object.create_primitive('PLANE', scale=[3, 3, 1], location=[0, 0, 10])
light_plane.set_name('light_plane')
light_plane_material = bproc.material.create('light_material')

# sample point light on shell
light_point = bproc.types.Light()
light_point.set_energy(200)

# load cc_textures
cc_textures = bproc.loader.load_ccmaterials(args.cc_textures_path)

# Define a function that samples 6-DoF poses
def sample_pose_func(obj: bproc.types.MeshObject):
    min = np.random.uniform([-0.3, -0.3, 0.0], [-0.2, -0.2, 0.0])
    max = np.random.uniform([0.2, 0.2, 0.4], [0.3, 0.3, 0.6])
    obj.set_location(np.random.uniform(min, max))
    obj.set_rotation_euler(bproc.sampler.uniformSO3())
    
# activate depth rendering without antialiasing and set amount of samples for color rendering
bproc.renderer.enable_depth_output(activate_antialiasing=False)
bproc.renderer.set_max_amount_of_samples(50)

for i in range(args.num_scenes):

    # Sample bop objects for a scene
    # tgt_obj_num = 15
    tgt_obj_num = 1
    sampled_target_bop_objs = list(np.random.choice(target_bop_objs, size=tgt_obj_num, replace=False))
    
    # new_obj_path = "data/charging_arm/oriented/textured_output.obj"
    # new_obj_path = "data/charge_oriented.glb"
    new_obj_path = "data/oriented2/mesh.obj"
    
    
    new_objects = bproc.loader.load_obj(filepath=new_obj_path)

    print('original object number is %d'%(len(sampled_target_bop_objs)))
    sampled_target_bop_objs.extend(new_objects)
    print('now, object number is %d'%(len(sampled_target_bop_objs)))
    
    for obj in new_objects:
    
        obj.set_cp("category_id", 0)  # 设置类别 ID
        obj.set_cp("is_custom_object", True)  # 标记为自定义对象
        obj.set_name("charging_arm")  # 设置对象名称
        obj.set_cp("inst_mark", "charging_arm")
        obj.set_cp("bop_dataset_name", "lm")
        # obj.set_shading_mode('auto')
        # obj.hide(False)

    
    # distactor_num = 3
    distactor_num = 1
    sampled_distractor_bop_objs = list(np.random.choice(tless_dist_bop_objs, size=distactor_num, replace=False))
    sampled_distractor_bop_objs += list(np.random.choice(ycbv_dist_bop_objs, size=0, replace=False))
    sampled_distractor_bop_objs += list(np.random.choice(tyol_dist_bop_objs, size=0, replace=False))

    # Randomize materials and set physics
    for obj in (sampled_target_bop_objs + sampled_distractor_bop_objs): 
        try:
            mat_list = obj.get_materials()
            if len(mat_list) > 0 :
                mat = mat_list[0]
                if obj.get_cp("bop_dataset_name") in ['itodd', 'tless']:
                    grey_col = np.random.uniform(0.1, 0.9)   
                    mat.set_principled_shader_value("Base Color", [grey_col, grey_col, grey_col, 1])        
                mat.set_principled_shader_value("Roughness", np.random.uniform(0, 1.0))
                mat.set_principled_shader_value("Specular IOR Level", np.random.uniform(0, 1.0))
            else:
                print('there is not material in this object')
            obj.hide(False)
        except Exception as e :
            print(e)
            pdb.set_trace()

    
    # Sample two light sources
    light_plane_material.make_emissive(emission_strength=np.random.uniform(3,6), 
                                    emission_color=np.random.uniform([0.5, 0.5, 0.5, 1.0], [1.0, 1.0, 1.0, 1.0]))  
    light_plane.replace_materials(light_plane_material)
    light_point.set_color(np.random.uniform([0.5,0.5,0.5],[1,1,1]))
    location = bproc.sampler.shell(center = [0, 0, 0], radius_min = 1, radius_max = 1.5,
                            elevation_min = 5, elevation_max = 89)
    light_point.set_location(location)

    # sample CC Texture and assign to room planes
    random_cc_texture = np.random.choice(cc_textures)
    for plane in room_planes:
        plane.replace_materials(random_cc_texture)


    # Sample object poses and check collisions 
    bproc.object.sample_poses(objects_to_sample = sampled_target_bop_objs + sampled_distractor_bop_objs,
                            sample_pose_func = sample_pose_func, 
                            max_tries = 1000)
            
    # Define a function that samples the initial pose of a given object above the ground
    def sample_initial_pose(obj: bproc.types.MeshObject):
        obj.set_location(bproc.sampler.upper_region(objects_to_sample_on=room_planes[0:1],
                                                    min_height=1, max_height=4, face_sample_range=[0.4, 0.6]))
        obj.set_rotation_euler(np.random.uniform([0, 0, 0], [0, 0, np.pi * 2]))

    # Sample objects on the given surface
    placed_objects = bproc.object.sample_poses_on_surface(objects_to_sample=sampled_target_bop_objs + sampled_distractor_bop_objs,
                                                          surface=room_planes[0],
                                                          sample_pose_func=sample_initial_pose,
                                                          min_distance=0.01,
                                                          max_distance=0.2)

    # BVH tree used for camera obstacle checks
    bop_bvh_tree = bproc.object.create_bvh_tree_multi_objects(sampled_target_bop_objs + sampled_distractor_bop_objs)

    
    num_views = 40
    cam_poses = 0
    
    while cam_poses < num_views:
    
        sample_front = True 
        #!=====================================================================================================================
        if sample_front :
            # Get the forward direction of the first object in new_objects
            H_m2w = Matrix(new_objects[0].get_local2world_mat())
            rotation_matrix = H_m2w.to_3x3()
            forward_vec = rotation_matrix.col[0]  
            forward_vec = -forward_vec  
            distance_in_front = 0.8  # Adjust the distance as needed

            
            location = new_objects[0].get_location() + forward_vec * distance_in_front
            location = bproc.sampler.shell(center = location,
                                    radius_min = 0.25,
                                    radius_max = 1,
                                    elevation_min = 5,
                                    elevation_max = 89)
            
        #!=====================================================================================================================

        sample_camera_randomly = False 
        if sample_camera_randomly:
            location = bproc.sampler.shell(center = [0, 0, 0],
                                    radius_min = 0.25,
                                    radius_max = 0.8,
                                    elevation_min = 5,
                                    elevation_max = 89)
            
        #!======================================================================================================== 

        #* set point of interest
        # Determine point of interest in scene as the object closest to the mean of a subset of objects 
        # poi = bproc.object.compute_poi(np.random.choice(sampled_target_bop_objs, size=10, replace=False))
        poi = bproc.object.compute_poi(new_objects)
        #!========================================================================================================
        # Compute rotation based on vector going from location towards poi
        rotation_matrix = bproc.camera.rotation_from_forward_vec(poi - location, inplane_rot=np.random.uniform(-0.7854, 0.7854))
        # Add homog cam pose based on location an rotation
        cam2world_matrix = bproc.math.build_transformation_mat(location, rotation_matrix)
        
        # Check that obstacles are at least 0.3 meter away from the camera and make sure the view interesting enough
        if bproc.camera.perform_obstacle_in_view_check(cam2world_matrix, {"min": 0.3}, bop_bvh_tree):
            # Persist camera pose
            bproc.camera.add_camera_pose(cam2world_matrix, frame=cam_poses)
            cam_poses += 1

    # render the whole pipeline
    data = bproc.renderer.render()

    # Write data in bop format
    #* frames_per_chunk: the number of image in one directory 
    bproc.writer.write_bop(os.path.join(args.output_dir, 'bop_data'),
                        #    target_objects = sampled_target_bop_objs,
                           target_objects = new_objects,
                           dataset = 'lm',
                           depth_scale = 1,
                           depths = data["depth"],
                           colors = data["colors"], 
                           color_file_format = "JPEG",
                           ignore_dist_thres = 10,frames_per_chunk = 1e+10)

    for obj in (sampled_target_bop_objs + sampled_distractor_bop_objs):
        obj.hide(True)