import os
import sys
import numpy as np
import torch
import time
import argparse
import xml.etree.ElementTree as ET

# Set environment variables for headless rendering
os.environ['MUJOCO_GL'] = 'osmesa'
os.environ['LIBGL_ALWAYS_SOFTWARE'] = '1'

import mujoco
import mujoco.viewer
import matplotlib.pyplot as plt

# Add the current directory to the path so we can import from hardware
sys.path.append(os.path.dirname(os.path.abspath(__file__)))

# Constants from hardware_whole_body.py
HW_DOF = 20

def wrap_to_pi(angles):
    angles %= 2*np.pi
    angles -= 2*np.pi * (angles > np.pi)
    return angles

class H1():
    """
    Simplified version of the H1 class from hardware_whole_body.py
    Adapted for Mujoco simulation
    """
    def __init__(self, task='stand'):
        self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
        self.task = task

        self.num_envs = 1
        if self.task=='stand':
            self.num_observations = 62 + 11 + 5
            self.num_actions = 11
        elif self.task=='stand_w_waist':
            self.num_observations = 62 + 11 + 5
            self.num_actions = 11
        elif self.task=='wb' or self.task=='squat':
            self.num_observations = 62 + 19 + 5
            self.num_actions = 19
        self.num_privileged_obs = None
        self.obs_context_len = 8

        # Scaling factors
        self.scale_lin_vel = 1.0
        self.scale_ang_vel = 1.0
        self.scale_orn = 1.0
        self.scale_dof_pos = 1.0
        self.scale_dof_vel = 1.0
        self.scale_action = 1.0

        # Joint limits and default positions
        self.joint_limit_lo = [-0.43,-1.57,-0.26,-0.43,-1.57,-0.26,-2.35,-0.43,-0.43,0,-0.87,-0.87,-2.87,-3.11,-4.45,-1.25,-2.87,-0.34,-1.3,-1.25]
        self.joint_limit_hi = [0.43,1.57,2.05,0.43,1.57,2.05,2.35,0.43,0.43,0,0.52,0.52,2.87,0.34,1.3,2.61,2.87,3.11,4.45,2.61]
        self.default_dof_pos_np = np.array([0.0,-10/180*np.pi,20/180*np.pi,0.0,-10/180*np.pi,
                                            20/180*np.pi,0.0,0.42,-0.42,0.0,
                                            -10/180*np.pi, -10/180*np.pi,0.0,0.0,0.0,
                                            0.0,0.0,0.0,0.0,0.0])

        # Convert to torch tensor
        default_dof_pos = torch.tensor(self.default_dof_pos_np, dtype=torch.float, device=self.device, requires_grad=False)
        self.default_dof_pos = default_dof_pos.unsqueeze(0)

        # PD gains for position control
        self.p_gains = np.array([60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 30.0, 30.0, 30.0, 30.0, 30.0, 30.0, 30.0, 30.0, 30.0, 30.0, 30.0, 30.0, 30.0, 30.0])
        self.d_gains = np.array([10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0])

        # Initialize observation history buffer
        # Based on the error message, the policy expects 84 features
        expected_obs_size = 84  # From the error: mat1 and mat2 shapes cannot be multiplied (8x78 and 84x128)
        self.obs_history_buf = torch.zeros((1, self.obs_context_len, expected_obs_size), device=self.device)

        # Load policy
        self.init_policy()

    def init_policy(self):
        """
        Initialize the policy from a saved model
        """
        print("Preparing policy")
        try:
            # Try to load the policy
            file_pth = os.path.dirname(os.path.realpath(__file__))
            policy_path = os.path.join(file_pth, "../hardware/ckpt/policy_1.pt")
            if os.path.exists(policy_path):
                self.policy = torch.jit.load(policy_path, map_location=self.device)
                self.policy.to(self.device)
                print("Policy loaded successfully")
            else:
                print(f"Policy file not found at {policy_path}")
                self.policy = None
        except Exception as e:
            print(f"Error loading policy: {e}")
            self.policy = None

class MujocoH1Simulator:
    """
    Mujoco simulator for the H1 robot
    """
    def __init__(self, task='stand'):
        # Load the URDF model
        self.model_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                                      "../resouce/mujoco/models/scene.xml")

        # Initialize the H1 environment
        self.h1_env = H1(task=task)

        # Load the model into Mujoco
        self.init_mujoco_model()

        # Initialize simulation state
        self.stand_up = True
        self.start_policy = False

    def init_mujoco_model(self):
        """
        Initialize the Mujoco model from the URDF file
        """
        try:
            # Check if the URDF file exists
            if not os.path.exists(self.model_path):
                print(f"URDF file not found at {self.model_path}")
                # Try to find the file in a different location
                alt_path = os.path.join(os.getcwd(), "../resouce/mujoco/models/h1.xml")
                if os.path.exists(alt_path):
                    self.model_path = alt_path
                    print(f"Found URDF at alternative path: {alt_path}")
                else:
                    # If we can't find the URDF, create a simple humanoid model
                    print("Creating a simple humanoid model instead")
                    self.model_path = os.path.join(os.path.dirname(mujoco.__file__), "model", "humanoid.xml")
                    return

            # Load the model from the URDF file
            print(f"Loading model from {self.model_path}")

            # For Mujoco, we need to convert URDF to XML first
            # This is a simplified approach - in a real implementation, you might need to use a URDF to MJCF converter
            try:
                # Try to load directly as XML

                self.physics = mujoco.MjModel.from_xml_path(self.model_path)
                self.data = mujoco.MjData(self.physics)

                print("Created physics from URDF directly")

                print("Added environment lighting and ground plane")
            except Exception as e:
                print(f"Error loading URDF directly: {e}")
                print("Creating a simple humanoid model instead")
                self.create_simple_humanoid_model()

        except Exception as e:
            print(f"Error initializing Mujoco model: {e}")
            print("Creating a simple humanoid model instead")
            self.create_simple_humanoid_model()

    def show_view(self):
        data = mujoco.MjData(self.physics)
        mujoco.viewer.launch(self.physics, data)

    def create_simple_humanoid_model(self):
        """
        Create a simple humanoid model for simulation
        """
        try:
            # Create a simple XML string for a humanoid model
            xml_string = """
            <mujoco model="humanoid">
                <compiler angle="degree" coordinate="local" inertiafromgeom="true"/>
                <option integrator="RK4" timestep="0.002"/>
                <default>
                    <joint armature="1" damping="1" limited="true"/>
                    <geom conaffinity="1" condim="1" contype="1" margin="0.001" material="geom" rgba="0.8 0.6 0.4 1"/>
                    <motor ctrllimited="true" ctrlrange="-1 1"/>
                </default>
                <asset>
                    <texture builtin="flat" height="1278" mark="cross" markrgb="1 1 1" name="texgeom" random="0.01" rgb1="0.8 0.6 0.4" rgb2="0.8 0.6 0.4" type="cube" width="127"/>
                    <texture builtin="checker" height="100" name="texplane" rgb1="0 0 0" rgb2="0.8 0.8 0.8" type="2d" width="100"/>
                    <material name="MatPlane" reflectance="0.5" shininess="1" specular="1" texrepeat="60 60" texture="texplane"/>
                    <material name="geom" texture="texgeom" texuniform="true"/>
                </asset>
                <worldbody>
                    <light cutoff="100" diffuse="1 1 1" dir="-0 0 -1.3" directional="true" exponent="1" pos="0 0 1.3" specular=".1 .1 .1"/>
                    <geom conaffinity="1" condim="3" material="MatPlane" name="floor" pos="0 0 0" rgba="0.8 0.9 0.8 1" size="40 40 40" type="plane"/>
                    <body name="torso" pos="0 0 1.4">
                        <joint armature="0" damping="0" limited="false" name="root" pos="0 0 0" stiffness="0" type="free"/>
                        <geom name="torso1" pos="0 0 0" size="0.07 0.07 0.07" type="sphere"/>
                        <geom name="head" pos="0 0 0.19" size="0.09" type="sphere"/>
                        <geom name="uwaist" fromto="-.01 -.01 -.07 .01 .01 -.07" size="0.06" type="capsule"/>
                        <body name="lwaist" pos="0 0 -0.1">
                            <geom name="lwaist" fromto="-.01 -.01 -.05 .01 .01 -.05" size="0.06" type="capsule"/>
                            <body name="pelvis" pos="0 0 -0.165">
                                <geom name="butt" fromto="-.02 -.07 0 .02 .07 0" size="0.09" type="capsule"/>
                                <body name="right_thigh" pos="-0.1 0 -0.04">
                                    <joint axis="0 1 0" name="right_hip_y" pos="0 0 0" range="-60 70" type="hinge"/>
                                    <joint axis="1 0 0" name="right_hip_x" pos="0 0 0" range="-120 20" type="hinge"/>
                                    <geom fromto="0 0 0 0 0 -0.34" name="right_thigh1" size="0.06" type="capsule"/>
                                    <body name="right_shin" pos="0 0 -0.403">
                                        <joint axis="0 -1 0" name="right_knee" pos="0 0 .02" range="-160 -2" type="hinge"/>
                                        <geom fromto="0 0 0 0 0 -0.3" name="right_shin1" size="0.049" type="capsule"/>
                                        <body name="right_foot" pos="0 0 -0.35">
                                            <geom name="right_foot" pos="0 0 0.06" size="0.075 0.15 0.03" type="box"/>
                                        </body>
                                    </body>
                                </body>
                                <body name="left_thigh" pos="0.1 0 -0.04">
                                    <joint axis="0 1 0" name="left_hip_y" pos="0 0 0" range="-60 70" type="hinge"/>
                                    <joint axis="1 0 0" name="left_hip_x" pos="0 0 0" range="-120 20" type="hinge"/>
                                    <geom fromto="0 0 0 0 0 -0.34" name="left_thigh1" size="0.06" type="capsule"/>
                                    <body name="left_shin" pos="0 0 -0.403">
                                        <joint axis="0 -1 0" name="left_knee" pos="0 0 .02" range="-160 -2" type="hinge"/>
                                        <geom fromto="0 0 0 0 0 -0.3" name="left_shin1" size="0.049" type="capsule"/>
                                        <body name="left_foot" pos="0 0 -0.35">
                                            <geom name="left_foot" pos="0 0 0.06" size="0.075 0.15 0.03" type="box"/>
                                        </body>
                                    </body>
                                </body>
                            </body>
                        </body>
                        <body name="right_upper_arm" pos="0 -0.17 0.06">
                            <joint axis="2 1 1" name="right_shoulder1" pos="0 0 0" range="-85 60" type="hinge"/>
                            <joint axis="0 -1 1" name="right_shoulder2" pos="0 0 0" range="-85 60" type="hinge"/>
                            <geom fromto="0 0 0 .16 -.16 -.16" name="right_uarm1" size="0.04 0.16" type="capsule"/>
                            <body name="right_lower_arm" pos=".18 -.18 -.18">
                                <joint axis="0 -1 -1" name="right_elbow" pos="0 0 0" range="-90 50" type="hinge"/>
                                <geom fromto="0.01 0.01 0.01 .17 .17 .17" name="right_larm" size="0.031" type="capsule"/>
                                <geom name="right_hand" pos=".18 .18 .18" size="0.04" type="sphere"/>
                            </body>
                        </body>
                        <body name="left_upper_arm" pos="0 0.17 0.06">
                            <joint axis="2 -1 1" name="left_shoulder1" pos="0 0 0" range="-60 85" type="hinge"/>
                            <joint axis="0 1 1" name="left_shoulder2" pos="0 0 0" range="-60 85" type="hinge"/>
                            <geom fromto="0 0 0 .16 .16 -.16" name="left_uarm1" size="0.04 0.16" type="capsule"/>
                            <body name="left_lower_arm" pos=".18 .18 -.18">
                                <joint axis="0 -1 -1" name="left_elbow" pos="0 0 0" range="-90 50" type="hinge"/>
                                <geom fromto="0.01 -0.01 0.01 .17 -.17 .17" name="left_larm" size="0.031" type="capsule"/>
                                <geom name="left_hand" pos=".18 -.18 .18" size="0.04" type="sphere"/>
                            </body>
                        </body>
                    </body>
                </worldbody>
                <actuator>
                    <motor gear="100" joint="right_hip_y" name="right_hip_y"/>
                    <motor gear="100" joint="right_hip_x" name="right_hip_x"/>
                    <motor gear="100" joint="right_knee" name="right_knee"/>
                    <motor gear="100" joint="left_hip_y" name="left_hip_y"/>
                    <motor gear="100" joint="left_hip_x" name="left_hip_x"/>
                    <motor gear="100" joint="left_knee" name="left_knee"/>
                    <motor gear="100" joint="right_shoulder1" name="right_shoulder1"/>
                    <motor gear="100" joint="right_shoulder2" name="right_shoulder2"/>
                    <motor gear="100" joint="right_elbow" name="right_elbow"/>
                    <motor gear="100" joint="left_shoulder1" name="left_shoulder1"/>
                    <motor gear="100" joint="left_shoulder2" name="left_shoulder2"/>
                    <motor gear="100" joint="left_elbow" name="left_elbow"/>
                </actuator>
            </mujoco>
            """

            # Create physics from the XML string
            # Use dm_control.mujoco.Physics instead of mujoco.Physics
            # from dm_control import mujoco as dm_mujoco
            self.physics = mujoco.MjModel.from_xml_string(xml_string)
            print("Created simple humanoid model")

        except Exception as e:
            print(f"Error creating simple humanoid model: {e}")
            raise

    def set_gains(self, kp, kd):
        """
        Set the PD gains for the joints
        """
        # Store the gains for later use
        self.kp = kp
        self.kd = kd

        # In a real implementation, we would set these in the Mujoco model
        # For now, we'll just store them for use in set_motor_position

    def set_motor_position(self, q):
        """
        Set the target position for the motors using PD control
        """
        # try:
        # Store the target position
        self.target_pos = q

        # Get the current joint positions and velocities
        current_pos = self.data.qpos.copy()
        current_vel = self.data.qvel.copy()

        # Calculate the control signal using PD control
        # control = kp * (target - current) - kd * velocity

        # For simplicity, we'll just apply the target positions directly to the actuators
        # In a real implementation, we would compute proper control signals

        # Get the number of actuators in the model
        num_actuators = self.physics.nq
        num_actuators = 19

        # Create control signal array
        ctrl = np.zeros(num_actuators)

        # Set control signals for the actuators we want to control
        # This is a simplified approach - in a real implementation, we would map the joints properly
        for i in range(min(len(q), num_actuators)):
            # Apply PD control: u = kp * (q_target - q_current) - kd * q_vel
            if i < len(self.kp) and i < len(self.kd) and i < len(current_pos) and i < len(current_vel):
                ctrl[i] = self.kp[i] * (q[i] - current_pos[i]) - self.kd[i] * current_vel[i]

        # Apply the control signals to the actuators
        self.data.ctrl[:] = ctrl

        # except Exception as e:
        #     print(f"Error in set_motor_position: {e}")

    def get_retarget(self, policy_actions=None):
        """
        Get the retargeted joint positions based on policy actions

        Args:
            policy_actions: Tensor of shape [1, 19] containing the policy output
                           If None, returns default positions

        Returns:
            target_positions: Array of joint target positions
            pose_info: Additional pose information (placeholder)
        """
        if policy_actions is None:
            # If no policy actions provided, return default positions
            return self.h1_env.default_dof_pos_np, np.array([0]*5)

        # Convert policy actions from tensor to numpy array
        actions_np = policy_actions.cpu().numpy().flatten()

        # Create a copy of the default positions to modify
        target_positions = self.h1_env.default_dof_pos_np.copy()

        # Map the policy actions to joint positions based on the task
        if self.h1_env.task == 'stand':
            # For stand task, we control 11 joints (legs + waist)
            # Map the first 11 actions to the corresponding joints
            leg_waist_indices = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]  # Indices for legs and waist joints
            for i, idx in enumerate(leg_waist_indices):
                if i < len(actions_np):
                    # Scale the action by the action scale factor
                    scaled_action = actions_np[i] * self.h1_env.scale_action
                    # Apply the action to the default position within joint limits
                    target_positions[idx] = np.clip(
                        target_positions[idx] + scaled_action,
                        self.h1_env.joint_limit_lo[idx],
                        self.h1_env.joint_limit_hi[idx]
                    )

        elif self.h1_env.task == 'stand_w_waist' or self.h1_env.task == 'wb' or self.h1_env.task == 'squat':
            # For whole body tasks, we control all 19 joints (excluding the waist yaw joint at index 9)
            # Map all 19 actions to the corresponding joints
            active_indices = list(range(9)) + list(range(10, 20))  # All joints except waist yaw (index 9)
            for i, idx in enumerate(active_indices):
                if i < len(actions_np):
                    # Scale the action by the action scale factor
                    scaled_action = actions_np[i] * self.h1_env.scale_action
                    # Apply the action to the default position within joint limits
                    target_positions[idx] = np.clip(
                        target_positions[idx] + scaled_action,
                        self.h1_env.joint_limit_lo[idx],
                        self.h1_env.joint_limit_hi[idx]
                    )

        # Create a placeholder for pose information (in a real implementation, this would be meaningful)
        pose_info = np.array([0] * 5)

        return target_positions, pose_info

    def main_loop(self):
        """
        Main simulation loop
        """
        import mujoco
        print("Starting main simulation loop")

        # Stand up phase
        standup_id = 0
        while self.stand_up and not self.start_policy:
            if standup_id == 999:
                print("---Initialized---")
            time_ratio = min(1, standup_id / 1000)
            self.set_gains(kp=time_ratio * self.h1_env.p_gains, kd=time_ratio * self.h1_env.d_gains)
            self.set_motor_position(q=self.h1_env.default_dof_pos_np)

            # Step the simulation
            # self.physics.step()
          
            mujoco.mj_step(self.physics, self.data)


            # If we have a policy, run it
            if self.h1_env.policy is not None:
                with torch.no_grad():
                    # Run the policy but don't use the actions yet policy shape:[1, 19]
                    ret = self.h1_env.policy(self.h1_env.obs_history_buf.detach())
                    # print(f"Policy output: {ret.shape} {ret}")

            standup_id += 1
           

            # If we've reached the end of the stand up phase, start the policy
            if standup_id >= 1000:
                self.stand_up = False
                self.start_policy = True

        # Main control loop
        cnt = 0
        fps_ckt = time.monotonic()
        target_jt_hw = self.h1_env.default_dof_pos_np.copy()  # Make a copy to avoid modifying the original

        # Try to launch the viewer
        try:
            # First try to use mujoco.viewer which is more robust
            use_mujoco_viewer = True
            try:
                import mujoco.viewer
                print("Using mujoco.viewer for rendering")
            except ImportError:
                use_mujoco_viewer = False
                print("mujoco.viewer not available, falling back to dm_control.viewer")

            if use_mujoco_viewer:
                # Use mujoco.viewer which is more robust
                # Instead of using launch directly, we'll use the callback mechanism
                # to update the simulation based on policy output

                # Define a callback function to apply policy actions during simulation
                def policy_callback(_model, data):  # model parameter is required but not used
                    nonlocal cnt, fps_ckt, target_jt_hw

                    try:
                        # Update counter and timing
                        cnt += 1
                        if cnt % 100 == 0:
                            fps = 100 / (time.monotonic() - fps_ckt)
                            fps_ckt = time.monotonic()
                            print(f"FPS: {fps:.2f}")

                        # Apply policy if available and enabled
                        if self.h1_env.policy is not None and self.start_policy:
                            # Create observation from current state
                            # Get expected observation size from the policy
                            expected_obs_size = self.h1_env.obs_history_buf.shape[-1]

                            # Create observation with meaningful data
                            obs_data = np.zeros(expected_obs_size)

                            # Fill observation with robot state data
                            # 1. Base velocity (linear and angular)
                            if len(data.qvel) >= 6:
                                base_lin_vel = data.qvel[:3].copy()
                                base_ang_vel = data.qvel[3:6].copy()

                                # Scale velocities
                                base_lin_vel = base_lin_vel * self.h1_env.scale_lin_vel
                                base_ang_vel = base_ang_vel * self.h1_env.scale_ang_vel

                                # Add to observation
                                obs_data[:6] = np.concatenate([base_lin_vel, base_ang_vel])

                            # 2. Joint positions and velocities
                            if len(data.qpos) >= HW_DOF and len(data.qvel) >= HW_DOF:
                                # Get DOF positions and velocities
                                dof_pos = data.qpos[-HW_DOF:].copy()
                                dof_vel = data.qvel[-HW_DOF:].copy()

                                # Scale DOF positions and velocities
                                dof_pos = dof_pos * self.h1_env.scale_dof_pos
                                dof_vel = dof_vel * self.h1_env.scale_dof_vel

                                # Add to observation
                                obs_data[6:6+HW_DOF] = dof_pos
                                obs_data[6+HW_DOF:6+2*HW_DOF] = dof_vel

                            # 3. Commands
                            command_idx = 6 + 2*HW_DOF
                            obs_data[command_idx:command_idx+3] = np.array([0.5, 0.0, 0.0])  # Example command

                            # Convert to torch tensor
                            obs_buf = torch.tensor(obs_data, dtype=torch.float, device=self.h1_env.device).unsqueeze(0)

                            # Update observation history buffer
                            self.h1_env.obs_history_buf = torch.cat([
                                self.h1_env.obs_history_buf[:, 1:],
                                obs_buf.unsqueeze(1)
                            ], dim=1)

                            # Run policy inference
                            with torch.no_grad():
                                policy_actions = self.h1_env.policy(self.h1_env.obs_history_buf.detach())

                                # Get retargeted joint positions based on policy actions
                                if self.h1_env.task == "stand":
                                    target_jt_hw, _ = self.get_retarget(policy_actions)
                                elif self.h1_env.task == "stand_w_waist" or self.h1_env.task == 'wb' or self.h1_env.task == 'squat':
                                    target_jt_hw, _ = self.get_retarget(policy_actions)

                                # Print action norm occasionally for debugging
                                if cnt % 100 == 0:
                                    print(f"Action norm: {torch.norm(policy_actions).item():.4f}")

                        # Apply the target positions to the simulation
                        if target_jt_hw is not None:
                            # Set the target positions in the simulation
                            for i in range(min(len(target_jt_hw), len(data.ctrl))):
                                data.ctrl[i] = target_jt_hw[i]

                    except Exception as e:
                        print(f"Error in policy callback: {e}")

                # Launch the viewer with our callback
                # mujoco.viewer.launch(self.physics, self.data, policy_callback)
                mujoco.viewer.launch(self.physics, self.data)
                return  # mujoco.viewer.launch is blocking, so we return after it's done
            else:
                # Fall back to dm_control.viewer
                from dm_control import viewer

                # Create a simple application to view the simulation
                viewer_app = viewer.launch(self.physics)

                # Main control loop with viewer
                while viewer_app._is_running:
                    loop_start_time = time.monotonic()

                    # We'll handle policy-based control in the policy section above
                    # This block is now redundant since we're applying policy actions directly
                    # We'll keep a fallback mechanism for when policy is not available
                    if self.start_policy and self.h1_env.policy is None:
                        # Fallback control when policy is not available
                        if cnt % 2 == 0:
                            if self.h1_env.task == "stand":
                                target_jt_hw = self.get_retarget()[0]  # Get first return value only
                            elif self.h1_env.task == "stand_w_waist" or self.h1_env.task == 'wb' or self.h1_env.task == 'squat':
                                target_jt_hw, _ = self.get_retarget()  # Ignore the pose for now

                            # Set the target positions
                            self.set_motor_position(q=target_jt_hw)

                    # Update observation buffer with the current state
                    # In a real implementation, this would be based on the robot state from sensors
                    # For now, we'll create a simple observation based on the physics state
                    try:
                        # Get joint positions and velocities (for reference, not used directly)
                        # These variables are kept for debugging purposes
                        _qpos = self.physics.data.qpos.copy()
                        _qvel = self.physics.data.qvel.copy()

                        # Create a simple observation (this is a placeholder)
                        # Make sure the observation has the correct shape
                        # From the error message, we need to match the input shape expected by the policy
                        # The error was: mat1 and mat2 shapes cannot be multiplied (8x78 and 84x128)
                        # This suggests the policy expects 84 features, but we're providing 78

                        # First, let's check the expected observation size
                        expected_obs_size = 84  # Based on the error message
                        actual_obs_size = self.h1_env.num_observations

                        # Print the sizes for debugging
                        if cnt % 100 == 0:
                            print(f"Expected obs size: {expected_obs_size}, Actual obs size: {actual_obs_size}")

                        # Create observation with the correct size and meaningful data
                        obs_data = np.zeros(expected_obs_size)

                        # Fill in the observation with actual robot state data
                        # This is a simplified version - in a real implementation, you would use sensor data

                        # 1. Base velocity and orientation (first part of observation)
                        # Get base velocity (linear and angular)
                        if hasattr(self.physics.data, 'qvel') and len(self.physics.data.qvel) >= 6:
                            base_lin_vel = self.physics.data.qvel[:3].copy()  # Linear velocity
                            base_ang_vel = self.physics.data.qvel[3:6].copy()  # Angular velocity

                            # Scale velocities
                            base_lin_vel = base_lin_vel * self.h1_env.scale_lin_vel
                            base_ang_vel = base_ang_vel * self.h1_env.scale_ang_vel

                            # Add to observation
                            obs_data[:6] = np.concatenate([base_lin_vel, base_ang_vel])

                        # 2. Joint positions and velocities (middle part of observation)
                        # Get joint positions and velocities for the DOFs we control
                        if hasattr(self.physics.data, 'qpos') and hasattr(self.physics.data, 'qvel'):
                            # Get DOF positions and velocities (simplified - in real implementation map correctly)
                            dof_pos = self.physics.data.qpos[-HW_DOF:].copy() if len(self.physics.data.qpos) >= HW_DOF else np.zeros(HW_DOF)
                            dof_vel = self.physics.data.qvel[-HW_DOF:].copy() if len(self.physics.data.qvel) >= HW_DOF else np.zeros(HW_DOF)

                            # Scale DOF positions and velocities
                            dof_pos = dof_pos * self.h1_env.scale_dof_pos
                            dof_vel = dof_vel * self.h1_env.scale_dof_vel

                            # Add to observation (starting at index 6)
                            obs_data[6:6+HW_DOF] = dof_pos
                            obs_data[6+HW_DOF:6+2*HW_DOF] = dof_vel

                        # 3. Commands (last part of observation)
                        # Add command signals (e.g., desired velocity) - in a real implementation, these would come from user input
                        command_idx = 6 + 2*HW_DOF
                        obs_data[command_idx:command_idx+3] = np.array([0.5, 0.0, 0.0])  # Example: move forward at 0.5 m/s

                        # Convert to torch tensor
                        obs_buf = torch.tensor(obs_data, dtype=torch.float, device=self.h1_env.device).unsqueeze(0)

                        # Update the observation history buffer
                        self.h1_env.obs_history_buf = torch.cat([
                            self.h1_env.obs_history_buf[:, 1:],
                            obs_buf.unsqueeze(1)
                        ], dim=1)

                        # Run the policy if available
                        if self.h1_env.policy is not None:
                            with torch.no_grad():
                                # Get actions from policy
                                policy_actions = self.h1_env.policy(self.h1_env.obs_history_buf.detach())

                                # Apply the actions to the simulation
                                if self.start_policy:
                                    # Get retargeted joint positions based on policy actions
                                    if self.h1_env.task == "stand":
                                        target_jt_hw, _ = self.get_retarget(policy_actions)
                                    elif self.h1_env.task == "stand_w_waist" or self.h1_env.task == 'wb' or self.h1_env.task == 'squat':
                                        target_jt_hw, _ = self.get_retarget(policy_actions)

                                    # Set the target positions immediately
                                    self.set_motor_position(q=target_jt_hw)

                                # Print action norm occasionally for debugging
                                if cnt % 100 == 0:
                                    print(f"Action norm: {torch.norm(policy_actions).item():.4f}")
                    except Exception as e:
                        print(f"Error in observation/policy step: {e}")

                # Step the simulation
                self.physics.step()

                # Update the viewer
                viewer_app.step()

                # Calculate FPS
                if cnt % 100 == 0:
                    fps = 100 / (time.monotonic() - fps_ckt)
                    fps_ckt = time.monotonic()
                    print(f"FPS: {fps:.2f}")

                cnt += 1

                # Sleep to maintain a reasonable frame rate
                time_to_sleep = max(0, 0.01 - (time.monotonic() - loop_start_time))
                time.sleep(time_to_sleep)
        except Exception as e:
            print(f"Error launching viewer: {e}")
            print("Trying to run with headless renderer...")

            try:
                # Try to use the headless renderer
                from dm_control import viewer
                from dm_control.viewer import renderer

                # Create a headless renderer
                _headless_renderer = renderer.NullRenderer()  # Variable not used but kept for clarity

                # Run with headless renderer
                print("Running with headless renderer...")
                for i in range(5000):  # Run for more steps since we can't see it
                    # Step the simulation
                    self.physics.step()

                    # Print progress occasionally
                    if i % 500 == 0:
                        print(f"Simulation step {i}/5000")

                    # Sleep to maintain a reasonable frame rate
                    time.sleep(0.01)
            except Exception as e2:
                print(f"Error with headless renderer: {e2}")
                print("Running without any renderer...")

                # Run without any renderer as a last resort
                # for i in range(1000):  # Run for 1000 steps
                #     # Step the simulation
                #     self.physics.step()

                #     # Print progress occasionally
                #     if i % 100 == 0:
                #         print(f"Simulation step {i}/1000")

                #     # Sleep to maintain a reasonable frame rate
                #     time.sleep(0.01)

def main():
    parser = argparse.ArgumentParser(description="Mujoco H1 Simulator")
    parser.add_argument("--task", type=str, default="stand", help="Task to perform (stand, stand_w_waist, wb, squat)")
    args = parser.parse_args()

    # Create and run the simulator
    simulator = MujocoH1Simulator(task=args.task)
    simulator.main_loop()
    # simulator.show_view()

if __name__ == "__main__":
    main()
