# pyrealsense2 is required.
# Please see instructions in https://github.com/IntelRealSense/librealsense/tree/master/wrappers/python
import pyrealsense2 as rs
import numpy as np
import cv2
import argparse
from os import makedirs
from os.path import exists, join
import shutil
import json
from enum import IntEnum

try:
    # Python 2 compatible
    input = raw_input
except NameError:
    pass


class Preset(IntEnum):
    Custom = 0
    Default = 1
    Hand = 2
    HighAccuracy = 3
    HighDensity = 4
    MediumDensity = 5


def make_clean_folder(path_folder):
    print (path_folder)
    if not exists(path_folder):
        makedirs(path_folder)
    else:
        shutil.rmtree(path_folder)
        makedirs(path_folder)


def save_intrinsic_as_json(filename, frame):
    intrinsics = frame.profile.as_video_stream_profile().intrinsics
    with open(filename, 'w') as outfile:
        json.dump(
            {
                'width':
                    intrinsics.width,
                'height':
                    intrinsics.height,
                'intrinsic_matrix': [
                    intrinsics.fx, 0, 0, 0, intrinsics.fy, 0, intrinsics.ppx,
                    intrinsics.ppy, 1
                ]
            },
            outfile,
            indent=4)


def record(path_output):
    total_frame=10*20
    
    path_depth = join(path_output, "depth")
    path_color = join(path_output, "color")
    make_clean_folder(path_output)
    make_clean_folder(path_depth)
    make_clean_folder(path_color)
    
    pipeline = rs.pipeline()
    
    # Create a config and configure the pipeline to stream
    #  different resolutions of color and depth streams
    config = rs.config()
    
    # note: using 640 x 480 depth resolution produces smooth depth boundaries
    #       using rs.format.bgr8 for color image format for OpenCV based image visualization
    config.enable_stream(rs.stream.depth,848, 480, rs.format.z16, 30)
    config.enable_stream(rs.stream.color, 848, 480, rs.format.bgr8, 30)
    # config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
    
    # Start streaming
    profile = pipeline.start(config)
    depth_sensor = profile.get_device().first_depth_sensor()
    # 我加的自动曝光&&&&&&&&&&&&&&&&&&&&&&&
    depth_sensor.set_option(rs.option.enable_auto_exposure, 1)  # 我加的后期处理&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
    # postprocess
    disparity_to_depth = rs.disparity_transform(transform_to_disparity=False)
    decimation = rs.decimation_filter()
    spatial = rs.spatial_filter()
    spatial.set_option(rs.option.filter_magnitude, 5)
    spatial.set_option(rs.option.filter_smooth_alpha, 1)
    spatial.set_option(rs.option.filter_smooth_delta, 50)
    spatial.set_option(rs.option.holes_fill, 3)
    decimation.set_option(rs.option.filter_magnitude, 4)
    temporal = rs.temporal_filter()
    hole_filling = rs.hole_filling_filter()
    # 我加的后期处理到此为止&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
    
    # Using preset HighAccuracy for recording
    depth_sensor.set_option(rs.option.visual_preset, Preset.HighAccuracy)
    
    # Getting the depth sensor's depth scale (see rs-align example for explanation)
    depth_scale = depth_sensor.get_depth_scale()
    print ("depth scale in meters=====================",depth_scale)
    # We will not display the background of objects more than
    #  clipping_distance_in_meters meters away
    clipping_distance_in_meters = 4  # 3 meter
    clipping_distance = clipping_distance_in_meters / depth_scale
    print("clipping dis", clipping_distance)
    
    # Create an align object
    # rs.align allows us to perform alignment of depth frames to others frames
    # The "align_to" is the stream type to which we plan to align depth frames.
    align_to = rs.stream.color
    align = rs.align(align_to)
    
    # Streaming loop
    frame_count = 0
    while frame_count <= total_frame:
        # Get frameset of color and depth
        frames = pipeline.wait_for_frames()
        
        # Align the depth frame to color frame
        aligned_frames = align.process(frames)
        
        # Get aligned frames
        aligned_depth_frame = aligned_frames.get_depth_frame()
        color_frame = aligned_frames.get_color_frame()
        
        # Validate that both frames are valid
        if not aligned_depth_frame or not color_frame:
            continue
        # 我家的后期处理*************
        #            aligned_depth_frame = decimation.process(aligned_depth_frame)
        #            aligned_depth_frame= depth_to_disparity.process(aligned_depth_frame)
        aligned_depth_frame = spatial.process(aligned_depth_frame)
        aligned_depth_frame = temporal.process(aligned_depth_frame)
        # aligned_depth_frame= disparity_to_depth.process(aligned_depth_frame)
        # aligned_depth_frame= hole_filling.process(aligned_depth_frame)
        
        # 我家的后期处理操持位置******************************
        depth_image = np.asanyarray(aligned_depth_frame.get_data())
        color_image = np.asanyarray(color_frame.get_data())
        
        if frame_count == 0:
            save_intrinsic_as_json(join(path_output, "camera_intrinsic.json"), color_frame)
        
        cv2.imwrite("%s/%06d.png" % \
                    (path_depth, frame_count), depth_image)
        cv2.imwrite("%s/%06d.jpg" % \
                    (path_color, frame_count), color_image)
        print("Saved color + depth image %06d" % frame_count)
        frame_count += 1
        
        # Remove background - Set pixels further than clipping_distance to grey
        grey_color = 153
        # depth image is 1 channel, color is 3 channels
        depth_image_3d = np.dstack((depth_image, depth_image, depth_image))
        bg_removed = np.where((depth_image_3d > clipping_distance) | \
                              (depth_image_3d <= 0), grey_color, color_image)
        
        # Render images
        depth_colormap = cv2.applyColorMap(
            cv2.convertScaleAbs(depth_image, alpha=0.09), cv2.COLORMAP_JET)
        images = np.hstack((bg_removed, depth_colormap))
        cv2.namedWindow('Recorder Realsense', cv2.WINDOW_AUTOSIZE)
        cv2.imshow('Recorder Realsense', images)
        key = cv2.waitKey(1)
        
        # if 'esc' button pressed, escape loop and exit program
        if key == 27:
            cv2.destroyAllWindows()
            break
    pipeline.stop()
