# import --------------
import os
import time

import mitsuba as mi
import numpy as np

import config
import normal_converter as nc
import util as ut

# ---------------------

"""
    Using CUDA variant.
"""
mi.set_variant('cuda_ad_rgb')

"""
    2/17
    We have four material textures in dataset, namely 
        'diffuse albedo',
        'diffuse normal',
        'specular albedo',
        'specular normal',
    Firstly, we should edit a default scene for texture-space-based rendering.
    
    2/18
    Scene has been edited and saved in 'textureSpace_diffuse_scene.xml'
    Consider that the render mode should adapt to Mitsuba's form, we can render the diffuse part and 
    specular part separately and finally add up together.
    * It should be noted that this process way may be not differentiable.
    
    2/19
    Since we use 'point light' for our first experiment, 'direct' integrator should be selected instead of 'path' one,
    or it will end up with black result.
    BUT, I find that 'direct' integrator can not simulate reflection, so we should use 'path' integrator corresponding
    with a shaped light source that can be detected by path-ray.
    Noted that the normal map stored in RealFaceDB is in object space,which is very different from tangent space normal.
    We should find some way to get the information from 'shape texture' to calculate tangent space normal.
    
    2/20
    Since normal map given by RealFaceDB is in object-space, we should convert then into tangent-space via shape texture
    stored in ReadFaceDB which indicate the coordinate of shape in object-space that transform into [0, 255].

    Get the value from shape-patch's corner, and calculate the TBN matrix.
    Considering that the shape represented by this shape-patch map may be curved, the result may not accurate.

    available variant:
    scalar_rgb, scalar_spectral, cuda_ad_rgb, llvm_ad_rgb
    
    2/21
    There are still two obstacles standing on the way. 
    One is the excess of range [0, 1] in converted normal map.
    Another is the visualization error of render result via converted normal map.
    
    The first obstacle 'excess' make me confused because IT STILL HAPPENED even after I applying [np.clip] to [0, 1] 
    before passing it to scene's parameters. I have no idea with this exception.
    
    The second obstacle 'visualization error' was happened when I have converted the object-space map into 
    tangent-space map。
        I test my tangent-space normal map in Blender and find that the render result is greatly incorrect! My 
        calculation process should not be wrong, so the reason may mainly be the incorrect usage of shape map.
        
        Let's change our mind and think of 'tangent space'.We convert object-space into tangent-space using 'TBN' matrix
        which consists of three basic vectors : 'tangent', 'bi-tangent', 'normal'. The normal of a certain plane is 
        always fixed, so the choice of 'tangent' and 'bi-tangent' really affect the 'TBN' matrix's value. Different 
        choice of these two vectors will lead to different convert of normal, always resulting in rotation error. And an
        important clue I got in Blender result is that the normal's overall trend has been greatly altered to wrong 
        direction. These phenomenon may probably caused by the incorrect choice of 'tangent'.
        
    I'm surprised to find that these two obstacles has been moved after I solved the tangent-space rotation problem. 
    
    
    2/22
    The tangent normal conversion is STILL not correct.
       After changing the origin of tangent space selection, the conversion result is still nor correct. 
       My conversion based on the assumption that:
            * Shape coordinates has been normalized into [0, 1] and can be straightly used to calculate the tangent,
              bi-tangent and normal.
            * The shape represented by the given shape-map patch may be a plane.
    Or we can convert the normal map in finely-grained degree.
    
    Reading paper of Position map and MaterialGAN
    
    2/23
    It suddenly hit me that, I don't need to convert the normal map into any other form. I can just render it on the
    flat plane, 
    
"""


def update_other_parameter() -> object:
    """
        Update other parameter in scene, such as:
            camera fov
            light radiance
            etc.
        Scene would be represented as type 'dict'
    """
    ...


def diffuse_specular_render(_index, file_type='png', spp=512) -> mi.TensorXf:
    """
       Render skin via Bi-direction Reflectance Distribution Function model.

       Notices that resource stored in RealFaceDB has been Numbered. Given an index, we can load all the resource
       corresponding to this index.
    """
    '''
        Load corresponding resource.
        Naming convention is followed by RealFaceDB. 
        Five resources need to be loaded:
            * diffuse albedo
            * diffuse normal
            * specular albedo
            * specular normal
            * shape
    '''
    # Confirm filepath.
    filename = ut.to_db_number_type(index=_index) + "." + file_type
    diffuse_reflectance_map_filepath = config.resource_base_path + 'diffAlbedo' + os.sep + filename
    diffuse_normal_map_filepath = config.resource_base_path + 'diffNormals' + os.sep + filename
    specular_reflectance_map_filepath = config.resource_base_path + 'specAlbedo' + os.sep + filename
    specular_normal_map_filepath = config.resource_base_path + 'specNormals' + os.sep + filename
    shape_map_filepath = config.resource_base_path + 'shape' + os.sep + filename
    if config.Using_specNormals:                            # Need to determine whether using specNormal or not.
        normal_map_filepath = specular_normal_map_filepath
    else:
        normal_map_filepath = diffuse_normal_map_filepath

    # Load map data in TensorXf format.
    diffuse_reflectance_tensor = mi.TensorXf(mi.Bitmap(mi.filesystem.path(diffuse_reflectance_map_filepath)))
    specular_reflectance_tensor = mi.TensorXf(mi.Bitmap(mi.filesystem.path(specular_reflectance_map_filepath)))
    specular_reflectance_tensor = mi.TensorXf(np.expand_dims(np.array(specular_reflectance_tensor), axis=-1))
    if config.convert_normal_into_tangent:
        normal_map_tensor = nc.convert_normal_map(normal_map_filepath, shape_map_filepath)
    else:
        normal_map_tensor = mi.TensorXf(mi.Bitmap(mi.filesystem.path(normal_map_filepath)))
        # Transform normal map value interval from [0, 255] into [-1, 1)
        normal_map_tensor = mi.TensorXf((np.array(normal_map_tensor) / config.PNG_CHANNELS) * 2 - 1)
    if config.invert_specular:
        specular_reflectance_tensor = mi.TensorXf(1 - np.array(specular_reflectance_tensor))

    # Transform reflectance map value interval from [0, 255] into [0, 1)
    diffuse_reflectance_tensor = mi.TensorXf(np.array(diffuse_reflectance_tensor) / config.PNG_CHANNELS)
    specular_reflectance_tensor = mi.TensorXf(np.array(specular_reflectance_tensor) / config.PNG_CHANNELS)

    '''
        Load Scene and manipulate parameters. Set:
            * diffuse albedo
            * specular albedo
            * tangent_normal
    '''
    # Load scene and obtain its parameter.
    scene = mi.load_file(config.scene_base_path + 'custom_scene/textureSpace_BRDF_scene.xml')
    params = mi.traverse(scene)

    # Manipulate scene's parameters.
    params[config.diffuse_albedo_key] = diffuse_reflectance_tensor
    params[config.specular_albedo_key] = specular_reflectance_tensor
    params[config.normal_map_key] = normal_map_tensor
    params.update()

    return mi.render(scene=scene, spp=spp)


if __name__ == "__main__":
    if not os.path.exists(config.output_path):
        os.makedirs(config.output_path, exist_ok=True)

    '''
        Notice the output filepath.
    '''
    print("Notice: result will output in :", config.output_path)
    for index in range(config.render_quantity):
        image = diffuse_specular_render(_index=index)

        identity = int(time.time()) - 1676942461

        mi.util.write_bitmap(config.output_path + str(index) + ".png", image)


"""
    CODE TOMB
    
def diffuse_part_render() -> mi.TensorXf:
    '''
        Load scene.
        It would be a good option that edit scene in code for dynamic usage.

        * Please notice the memory structure of Mitsuba.
    '''
    scene = mi.load_file(data_basePath + 'custom_scene/textureSpace_diffuse_scene.xml')
    params = mi.traverse(scene)

    '''
        Calculate the tangent space normal map and replace it in scene.
    '''
    tangent_normal_tensor = nc.convert_normal_map(
        'data/texture/specNormals/00000.png',
        './data/texture/shape/00000.png')
    params['texture.normalmap.data'] = tangent_normal_tensor
    params.update()

    # render
    return mi.render(scene, spp=256)


def specular_part_render() -> mi.TensorXf:
    '''
        Load scene.
    '''
    scene = mi.load_file(data_basePath + 'custom_scene/textureSpace_specular_scene.xml')
    params = mi.traverse(scene)

    '''
        Calculate the tangent space normal map and replace it in scene.
    '''
    tangent_normal_tensor = nc.convert_normal_map(
        'data/texture/specNormals/00000.png',
        './data/texture/shape/00000.png')
    print(params['texture.normalmap.data'])
    params['texture.normalmap.data'] = tangent_normal_tensor
    params.update()

    return mi.render(scene, spp=512)
"""
