# SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# 
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Copyright (c) 2021 ETH Zurich, Nikita Rudin

# from legged_gym.envs.base.legged_robot_config import LeggedRobotCfg, LeggedRobotCfgPPO
from legged_gym.envs.nav_env.nav_base_config import LeggedNavCfg, LeggedNavCfgPPO

class AliengoNavSingleFTV2Cfg( LeggedNavCfg ):
    class env( LeggedNavCfg.env ):
        episode_length_s = 80
        # num_envs = 3012
        num_envs = 6144
        n_scan = 132  # 11x12
        n_priv = 3 
        n_priv_latent = 4 + 1 + 12 + 12  # 29
        n_proprio = 3 + 2 + 1 + 2 + 3 + 36 + 4 # 54 no time remain add num goals (one hot)
        # n_proprio = 3 + 2 + 3 + 36 + 4
        history_len = 10  # 570
        wp_info_dims = 8
        num_observations = n_proprio + n_scan + history_len * n_proprio + n_priv_latent + n_priv # 57 + 132 + 10*57 + 29 + 3
        
        randomize_start_yaw = True
        # rand_yaw_range = 1.5
        reach_goal_delay = 2
        
    class init_state( LeggedNavCfg.init_state ):
        pos = [0.0, 0.0, 0.42] # x,y,z [m]  add 5cm in z 
        default_joint_angles = { # = target angles [rad] when action = 0.0
            'FL_hip_joint': 0.1,   # [rad]
            'RL_hip_joint': 0.1,   # [rad]
            'FR_hip_joint': -0.1 ,  # [rad]
            'RR_hip_joint': -0.1,   # [rad]

            'FL_thigh_joint': 0.8,     # [rad]
            'RL_thigh_joint': 1.,   # [rad]
            'FR_thigh_joint': 0.8,     # [rad]
            'RR_thigh_joint': 1.,   # [rad]

            'FL_calf_joint': -1.5,   # [rad]
            'RL_calf_joint': -1.5,    # [rad]
            'FR_calf_joint': -1.5,  # [rad]
            'RR_calf_joint': -1.5,    # [rad]
        }

    class depth( LeggedNavCfg.depth ):
        use_camera = False
        stack_depth = False
        camera_num_envs = 192
        camera_terrain_num_rows = 10
        camera_terrain_num_cols = 10

        # upside camera
        # position = [0.331, 0, 0.031]  
        # angle = [-25,-15]  # positive pitch down
        
        # downside camera
        position = [0.331, 0, -0.034] 
        angle = [9, 19]  

        update_interval = 5  # 5 works without retraining, 8 worse

        original = (106, 60)
        resized = (87, 58)
        horizontal_fov = 87
        buffer_len = 2
        
        near_clip = 0
        far_clip = 2
        dis_noise = 0.0
        
        scale = 1
        invert = True

    class control( LeggedNavCfg.control ):
        # PD Drive parameters:
        control_type = 'P'
        # stiffness = {'joint': 20.}  # [N*m/rad]
        # damping = {'joint': 0.5}     # [N*m*s/rad]
        stiffness = {'joint': 40.}  # [N*m/rad]
        damping = {'joint': 2.}     # [N*m*s/rad]
        action_scale = 0.25
        decimation = 4
    
    class terrain( LeggedNavCfg.terrain ):
        mesh_type = 'trimesh' # "heightfield" # none, plane, heightfield or trimesh
        hf2mesh_method = "grid"  # grid or fast
        max_error = 0.1 # for fast
        max_error_camera = 2
        waypoint_info_dim = 0
        y_range = [-0.4, 0.4]
        x_range = [1.5, 2.4]
        
        edge_width_thresh = 0.05
        horizontal_scale = 0.05 # [m] influence computation time by a lot
        horizontal_scale_camera = 0.1
        vertical_scale = 0.005 # [m]
        
        height = [0.02, 0.06]
        simplify_grid = False
        gap_size = [0.02, 0.1]
        stepping_stone_distance = [0.02, 0.08]
        downsampled_scale = 0.075
        curriculum = True
        diy = False

        all_vertical = False
        no_flat = True
        
        static_friction = 1.0
        dynamic_friction = 1.0
        restitution = 0.
        measure_heights = True
        measured_points_x = [-0.45, -0.3, -0.15, 0, 0.15, 0.3, 0.45, 0.6, 0.75, 0.9, 1.05, 1.2] # 1mx1.6m rectangle (without center line)
        measured_points_y = [-0.75, -0.6, -0.45, -0.3, -0.15, 0., 0.15, 0.3, 0.45, 0.6, 0.75]
        measure_horizontal_noise = 0.0
        
        loca_measured_points_x = [-0.45, -0.3, -0.15, 0., 0.15, 0.3, 0.45]
        # loca_measured_points_y = [-0.45, -0.3, -0.15, 0., 0.15, 0.3, 0.45]
        loca_measured_points_y = [-0.15, 0., 0.15]
        n_loca_points_x = 7
        n_loca_points_y = 3

        selected = False # select a unique terrain type and pass all arguments
        terrain_kwargs = None # Dict of arguments for selected terrain
        max_init_terrain_level = 2 # starting curriculum state
        border_size = 0 # [m]
        
        nav_terrain = True
        # sample_width_thresh = 0.1
        # max_difficulty = True
        n_rows_per_terrain = 5
        n_cols_per_terrain = 4
        block_length = 3.0
        block_width = 3.0
        wall_thickness = 0.15 # [m]
        wall_height = 2.5 # [m]
        nav_terrain_length = (n_rows_per_terrain+2) * block_length + wall_thickness*2
        nav_terrain_width = n_cols_per_terrain * block_width + wall_thickness*2
        nav_step_options = [
                "forbidden",
                "flat",
                "gap",
                "hurdle",
                "jump_up",
                "jump_down",
                # "stairs_up",
                # "stairs_down",
        ]
        nav_stair_options = [
                "forbidden",
                "flat",
                "gap",
                "hurdle",
                # "jump_up",
                # "jump_down",
                "stairs_up",
                "stairs_down",
        ]
        # hurdle = {"height": (0.1, 0.35), "length": (0.4, 0.5), "fake_offset": (-0, 0.), "goal_offset": (0.5),}
        # gap = {"depth": (0.5, 1.4), "length": (0.1, 0.45), "fake_offset": (-0., 0.), "goal_offset": (0.5),}
        # jump_up = {"height": (0.1, 0.45), "length": (0.3, 0.7), "fake_offset": (-0.5, 0.5),}
        # jump_down = {"depth": (0.1, 0.45), "length": (0.3, 0.7), "fake_offset": (-0.5, 0.5),}
        # discrete = {"num_rects": 5, "max_height": (0.05, 0.08), "min_size": 0.5, "max_size": 1.5}
        # forbidden = {"height": (3., 5.), 'obs_size': (0.2, 1.0),}
        # stairs_up = {"num_steps": 3, "step_height": (0.15, 0.22), "step_len": (0.18, 0.27),}
        # stairs_down = {"num_steps": 3, "step_height": (0.15, 0.22), "step_len": (0.18, 0.27),}
        
        hurdle = {"height": (0.1, 0.25), "length": (0.25, 0.35), "fake_offset": (-0, 0.), "goal_offset": (0.5),}
        gap = {"depth": (0.5, 1.4), "length": (0.1, 0.35), "fake_offset": (-0., 0.), "goal_offset": (0.5),}
        jump_up = {"height": (0.1, 0.32), "length": (0.3, 0.7), "fake_offset": (-0.5, 0.5),}
        jump_down = {"depth": (0.1, 0.32), "length": (0.3, 0.7), "fake_offset": (-0.5, 0.5),}
        discrete = {"num_rects": 5, "max_height": (0.05, 0.08), "min_size": 0.5, "max_size": 1.5}
        forbidden = {"height": (3.5, 5.), 'obs_size': (0.2, 1.0),}
        # forbidden = {"height": (0.05, 0.1), 'obs_size': (0.2, 1.0),}
        stairs_up = {"num_steps": 3, "step_height": (0.15, 0.2), "step_len": (0.18, 0.27),}
        stairs_down = {"num_steps": 3, "step_height": (0.15, 0.2), "step_len": (0.18, 0.27),}
        
        single_fine_tune = True
        virtual = True

        sample_size = 2.5
        if_wall = True

        num_rows = 10 # number of terrain rows (levels)  # spreaded is benifitiall !
        num_cols = 10 # number of terrain cols (types)

        # trimesh only:
        slope_treshold = 1.5  # slopes above this threshold will be corrected to vertical surfaces
        sample_slope_treshold = 5
        forbidden_slope_treshold = 60
        origin_zero_z = True
        num_goals = 8  # high limit

    class noise:
        add_noise = True
        noise_level = 1.0 # scales other values
        class noise_scales:
            dof_pos = 0.01
            dof_vel = 1.5
            lin_vel = 0.1
            ang_vel = 0.2
            gravity = 0.05
            contact_filt = 0.0
            imu_noise = 0.0
            height_measurements = 0.1

    class asset( LeggedNavCfg.asset ):
        file = '{LEGGED_GYM_ROOT_DIR}/resources/robots/aliengo/urdf/aliengo.urdf'
        foot_name = "foot"
        penalize_contacts_on = ["thigh", "calf", "base"]
        terminate_after_contacts_on = ["base"]#, "thigh", "calf"]
        self_collisions = 1 # 1 to disable, 0 to enable...bitwise filter
  
    class rewards( LeggedNavCfg.rewards ):
        class scales:
            # normal rewards
            tracking_goal_vel = 1.5
            tracking_yaw = 1
            forward = 1
            
            stay = 2
            
            # high penalties
            stall = -1.
            
            head_follow_vel = -0.5
            
            # base_collision = -10  # small
            # lin_vel_z = -1.0  # small
            ang_vel_xy = -0.05  
            
            # no_y_velocity = -0.005
            
            orientation = -1.5 # * 1.3
            dof_acc = -2.5e-7
            num_collision = -10. 
            # action_rate = -0.1 * 2.2
            action_rate = -0.01
            delta_torques = -1.0e-7 * 2.2  # small
            torques = -0.00001 * 2.2
            hip_pos = -0.5 
            feet_edge = -1 
            dof_error = -0.04 * 2 
            feet_stumble = -1.
            # termination = -10  # small

            # HIM reward
            joint_power = -2e-5
            smoothness = -0.01
            foot_clearance = -0.01  # small
            
        clearance_height_target = -0.20
        finetune_goals = 8
        dist_levels = 5
        goal_level_up = (0.05, 0.1)
        single_goal_time_limit_s = 30
        only_positive_rewards = True # if true negative total rewards are clipped at zero (avoids early termination problems)
        tracking_sigma = 0.2 # tracking reward = exp(-error^2/sigma)
        soft_dof_pos_limit = 1. # percentage of urdf limits, values above this limit are penalized
        soft_dof_vel_limit = 1
        soft_torque_limit = 0.4
        base_height_target = 1.
        max_contact_force = 40. # forces above this value are penalized
        termination_threshold = 1.
        reach_final_scale = 100.
        obs_yaw = True
        obs_goal = True
        
    class domain_rand( LeggedNavCfg.domain_rand ):
        randomize_friction = True
        friction_range = [0.2, 2]
        
        randomize_base_mass = True
        added_mass_range = [-1, 2]

        randomize_base_com = True
        added_com_range = [-0.05, 0.05]

class AliengoNavSingleFTV2CrawlCfg( AliengoNavSingleFTV2Cfg ):
    class env( AliengoNavSingleFTV2Cfg.env ):
        n_scan = 132  # 11x12
        n_priv = 3 
        n_priv_latent = 4 + 1 + 12 + 12  # 29
        n_proprio = 3 + 2 + 1 + 2 + 3 + 36 + 4 + 1 # 54 no time remain add num goals (one hot)
        # n_proprio = 3 + 2 + 3 + 36 + 4
        history_len = 10  # 570
        num_observations = n_proprio + n_scan + history_len * n_proprio + n_priv_latent + n_priv # 57 + 132 + 10*57 + 29 + 3
        
        randomize_start_yaw = True
        reach_goal_delay = 2
        
    class rewards( AliengoNavSingleFTV2Cfg.rewards ):
        crawl_target_base_height = 0.27
        stand_target_base_height = 0.38
        resampling_time = 2.
        class scales:
            # normal rewards
            tracking_goal_vel = 1.5 * 2
            tracking_yaw = 1
            forward = 1
            
            stay = 5.
            
            stall = -1.
            base_collision = -1
            lin_vel_z = -1.0
            ang_vel_xy = -0.05  
            orientation = -1.5
            dof_acc = -2.5e-7
            num_collision = -10. 
            action_rate = -0.1 * 2.2
            delta_torques = -1.0e-7 * 2.2
            torques = -0.00001 * 2.2
            hip_pos = -0.5 
            feet_edge = -1 
            dof_error = -0.04 * 2 
            feet_stumble = -1.

            # HIM reward
            joint_power = -2e-5
            smoothness = -0.01
            foot_clearance = -0.01

            # base_target_penal = -2.0
            base_target = 3.5

    class terrain( AliengoNavSingleFTV2Cfg.terrain ):
        nav_step_options = [
                # "forbidden",
                "hurdle",
                "flat",
                "gap",
                "jump_up",
                "jump_down",
                # "stairs_up",
                # "stairs_down",
        ]
        nav_stair_options = [
                # "forbidden",
                "hurdle",
                "flat",
                "gap",
                # "jump_up",
                # "jump_down",
                "stairs_up",
                "stairs_down",
        ]

class AliengoNavSingleFTV2CrawlCfgPPO( LeggedNavCfgPPO ):
    # runner_class_name = 'OnPolicyRunner'
    class algorithm( LeggedNavCfgPPO.algorithm ):
        entropy_coef = 0.01
    class runner( LeggedNavCfgPPO.runner ):
        # policy_class_name = 'ActorCriticRMA'
        # algorithm_class_name = 'PPO'
        num_steps_per_env = 24 # per iteration
        run_name = ''
        experiment_name = 'aliengo'
        
    class estimator( LeggedNavCfgPPO.estimator ):
        num_prop = AliengoNavSingleFTV2CrawlCfg.env.n_proprio
