# SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# 
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Copyright (c) 2021 ETH Zurich, Nikita Rudin

from legged_gym.envs.base.legged_robot_config import LeggedRobotCfg, LeggedRobotCfgPPO


class WP_A1ParkourCfg( LeggedRobotCfg ):
    class env( LeggedRobotCfg.env ):
        num_envs = 6144

        n_scan = 132  # 11x12
        n_priv = 3 
        n_priv_latent = 4 + 1 + 12 + 12  # 29
        n_proprio = 3 + 2 + 2 + 4 + 3 + 36 + 4 + 2 + 1 - 8# 57 
        # n_proprio = 3 + 2 + 2 + 4 + 3 + 36 + 4 + 2 + 1# 57 
        history_len = 10  # 570
        wp_info_dims = 8

        num_observations = n_proprio + n_scan + history_len * n_proprio + n_priv_latent + n_priv # 54 + 132 + 10*54 + 29 + 3
        num_privileged_obs = None # if not None a priviledge_obs_buf will be returned by step() (critic obs for assymetric training). None is returned otherwise 
        episode_length_s = 20 # episode length in seconds
    
    class init_state( LeggedRobotCfg.init_state ):
        pos = [0.0, 0.0, 0.42] # x,y,z [m]
        default_joint_angles = { # = target angles [rad] when action = 0.0
            'FL_hip_joint': 0.1,   # [rad]
            'RL_hip_joint': 0.1,   # [rad]
            'FR_hip_joint': -0.1 ,  # [rad]
            'RR_hip_joint': -0.1,   # [rad]

            'FL_thigh_joint': 0.8,     # [rad]
            'RL_thigh_joint': 1.,   # [rad]
            'FR_thigh_joint': 0.8,     # [rad]
            'RR_thigh_joint': 1.,   # [rad]

            'FL_calf_joint': -1.5,   # [rad]
            'RL_calf_joint': -1.5,    # [rad]
            'FR_calf_joint': -1.5,  # [rad]
            'RR_calf_joint': -1.5,    # [rad]
        }

    class control( LeggedRobotCfg.control ):
        # PD Drive parameters:
        control_type = 'P'
        stiffness = {'joint': 40.}  # [N*m/rad]
        damping = {'joint': 1}     # [N*m*s/rad]
        action_scale = 0.25
        decimation = 4
        
    class asset( LeggedRobotCfg.asset ):
        file = '{LEGGED_GYM_ROOT_DIR}/resources/robots/a1/urdf/a1.urdf'
        foot_name = "foot"
        penalize_contacts_on = ["thigh", "calf", "base"]
        terminate_after_contacts_on = ["base"]#, "thigh", "calf"]
        self_collisions = 1 # 1 to disable, 0 to enable...bitwise filter
    
    class terrain( LeggedRobotCfg.terrain ):
        mesh_type = 'trimesh' # "heightfield" # none, plane, heightfield or trimesh
        hf2mesh_method = "grid"  # grid or fast
        max_error = 0.1 # for fast
        max_error_camera = 2
        waypoint_info_dim = 0
        y_range = [-0.4, 0.4]
        x_range = [1.5, 2.4]
        
        edge_width_thresh = 0.05
        horizontal_scale = 0.05 # [m] influence computation time by a lot
        # horizontal_scale = 0.1 # [m] influence computation time by a lot
        horizontal_scale_camera = 0.1
        vertical_scale = 0.005 # [m]
        border_size = 0 # [m]
        height = [0.02, 0.06]
        simplify_grid = False
        gap_size = [0.02, 0.1]
        stepping_stone_distance = [0.02, 0.08]
        downsampled_scale = 0.075
        curriculum = True
        diy = False

        all_vertical = False
        no_flat = True
        
        static_friction = 1.0
        dynamic_friction = 1.0
        restitution = 0.
        measure_heights = True
        measured_points_x = [-0.45, -0.3, -0.15, 0, 0.15, 0.3, 0.45, 0.6, 0.75, 0.9, 1.05, 1.2] # 1mx1.6m rectangle (without center line)
        measured_points_y = [-0.75, -0.6, -0.45, -0.3, -0.15, 0., 0.15, 0.3, 0.45, 0.6, 0.75]
        measure_horizontal_noise = 0.0

        selected = False # select a unique terrain type and pass all arguments
        terrain_kwargs = None # Dict of arguments for selected terrain
        max_init_terrain_level = 5 # starting curriculum state
        
        terrain_length = 18
        terrain_width = 4
        num_rows = 10 # number of terrain rows (levels)  # spreaded is benifitiall !
        num_cols = 40 # number of terrain cols (types)

        terrain_dict = {"smooth slope": 0., 
                        "rough slope up": 0.,
                        "rough slope down": 0.0,
                        "rough stairs up": 0., 
                        "rough stairs down": 0., 
                        "discrete": 0., 
                        "stepping stones": 0.0,
                        "gaps": 0., 
                        "smooth flat": 0,
                        "pit": 0.0,
                        "wall": 0.0,
                        "platform": 0.,
                        "large stairs up": 0.,
                        "large stairs down": 0.,
                        "parkour": 0.1,
                        "parkour_hurdle": 0.1,
                        "parkour_flat": 0.,
                        "parkour_step": 0.1,
                        "parkour_gap": 0.1,
                        "demo": 0.0,
                        "my_side_slope_obs": 0.,
                        "my_hurdle_obs": 0.,
                        "my_platform_obs": 0.,
                        "my_gap_obs": 0.,
                        "my_flat_obs": 0.1,
                        "my_slop_obs": 0.1,}
        # obs
        platform_width = [3.5, 3]
        # ori
        flat_y_range = [-1.2, 1.2]
        flat_x_range = [1.2, 2.2]
        hurdle_x_range = [1.2, 2.2]
        side_slop_len = [0.9, 1.]
        step_len = [0.3, 1.5]
        gap_x_range = [0.8, 1.5]
        terrain_proportions = list(terrain_dict.values())

        # trimesh only:
        slope_treshold = 1.5  # slopes above this threshold will be corrected to vertical surfaces
        origin_zero_z = True
        num_goals = 8
  
        class rewards:
            class scales:
                # TODO add feet air time for aliengo
                # tracking rewards
                # final_goal = 2.
                tracking_goal_vel = 1.5 * 1.5
                # tracking_goal_vel = 5.  
                tracking_yaw = 0.5 * 1.5 
                forward = 0.5 * 2
                # step_up = 1.0
                # reach_goal = 100.
                # eat_waypoint = 5.
                # goto_waypoint = 2.

                # regularization rewar
                # balance = 1.
                # termination = -5.
                # head_follow_vel = -0.5
                # base_height = -0.5
                base_collision = -0.5
                lin_vel_z = -1.0  
                ang_vel_xy = -0.05  
                orientation = -1.  
                dof_acc = -2.5e-7
                num_collision = -10. 
                action_rate = -0.1  
                delta_torques = -1.0e-7 
                torques = -0.00001  
                hip_pos = -0.5  
                dof_error = -0.04 * 0.75
                feet_stumble = -1  
                feet_edge = -1  
            
        only_positive_rewards = True # if true negative total rewards are clipped at zero (avoids early termination problems)
        tracking_sigma = 0.2 # tracking reward = exp(-error^2/sigma)
        soft_dof_pos_limit = 1. # percentage of urdf limits, values above this limit are penalized
        soft_dof_vel_limit = 1
        soft_torque_limit = 0.4
        base_height_target = 1.
        max_contact_force = 40. # forces above this value are penalized
        termination_threshold = 1.
        reach_final_scale = 100.
        obs_yaw = True
        obs_goal = True
        # obs_yaw = False
        # obs_goal = False

class WP_A1ParkourCfgPPO( LeggedRobotCfgPPO ):
    class policy( LeggedRobotCfgPPO.policy ):
        waypoint_encoder_dims = [128, 64, 32, 16]
        
    class algorithm( LeggedRobotCfgPPO.algorithm ):
        entropy_coef = 0.01
    
    class runner( LeggedRobotCfgPPO.runner ):
        policy_class_name = 'ActorCriticWP'
        run_name = ''
        experiment_name = 'rough_a1'

  
