import gymnasium as gym
import mani_skill2.envs
from sapien.core import Pose
import numpy as np

from stable_baselines3 import PPO
from stable_baselines3.common.env_util import make_vec_env

from mani_skill2.cocube_udp import CoCube
import time
import threading

cocube_pos = np.array([0.0,0.0])
cocube_rot = 0.0
agent = CoCube(1, gateway="192.168.3.1", local_ip="0.0.0.0")
gripper_action = -1

def cocube_thread():
    global cocube_pos, cocube_rot, gripper_action
    cocube_pos[0] = agent.pos_m[0]-0.25156
    cocube_pos[1] = agent.pos_m[1]-0.51177
    cocube_rot = agent.yaw
    time.sleep(7)
    uuid = agent.move_to_target(target_x=178, target_y=68)
    
    while True:
        time.sleep(0.01)
        flag, result = agent.judge_whether_finished(uuid)
        # agent.set_wheel_speed(10, 10)
        
        # cocube_pos = agent.pos_m
        cocube_pos[0] = agent.pos_m[0]-0.25156
        cocube_pos[1] = agent.pos_m[1]-0.51177
        cocube_rot = agent.yaw
        # print('1. cocube_pos:',cocube_pos, 'cocube_rot:', cocube_rot, 'agent.pos_m:', agent.pos_m)
        if flag:
            print(result)
            print("successfully")
            break
    gripper_action = 1
    uuid = agent.gripper_degree(degree=-70)
    
    while True:
        time.sleep(0.01)
        flag, result = agent.judge_whether_finished(uuid)
        # agent.set_wheel_speed(10, 10)

        # cocube_pos = agent.pos_m
        cocube_pos[0] = agent.pos_m[0]-0.25156
        cocube_pos[1] = agent.pos_m[1]-0.51177
        cocube_rot = agent.yaw
        # print('1. cocube_pos:',cocube_pos, 'cocube_rot:', cocube_rot, 'agent.pos_m:', agent.pos_m)
        if flag:
            print(result)
            print("successfully")
            break
    time.sleep(0.3)

    uuid = agent.move_to_target(target_x=69, target_y=88)
    while True:
        time.sleep(0.01)
        flag, result = agent.judge_whether_finished(uuid)
        # agent.set_wheel_speed(10, 10)

        # cocube_pos = agent.pos_m
        cocube_pos[0] = agent.pos_m[0]-0.25156
        cocube_pos[1] = agent.pos_m[1]-0.51177
        cocube_rot = agent.yaw
        # print('1. cocube_pos:',cocube_pos, 'cocube_rot:', cocube_rot, 'agent.pos_m:', agent.pos_m)
        if flag:
            print(result)
            print("successfully")
            break

    uuid = agent.gripper_degree(degree=-30)
    gripper_action = -1
    while True:
        time.sleep(0.01)
        # cocube_pos = agent.pos_m
        cocube_pos[0] = agent.pos_m[0]-0.25156
        cocube_pos[1] = agent.pos_m[1]-0.51177
        cocube_rot = agent.yaw

# Start the CoCube thread
thread = threading.Thread(target=cocube_thread)
thread.start()

env_id = "CocubeGripper-v0"

model1_path = 'train/logs/2024-11-27-21-40-10/best_model.zip'


obs_mode = "state"
control_mode = "pd_joint_vel"
reward_mode = "normalized_dense"

# Parallel environments
vec_env = make_vec_env(env_id=env_id, 
                       env_kwargs={"obs_mode":obs_mode,"control_mode":control_mode,"reward_mode":reward_mode,"render_mode":"human"}, 
                       n_envs=1)
# throw_env = make_vec_env(env_id='SpaceThrowCube-v0', 
#                        env_kwargs={"obs_mode":obs_mode,"control_mode":control_mode,"reward_mode":reward_mode,"render_mode":"human"}, 
#                        n_envs=1)

model_1 = PPO.load(model1_path)
# model_2 = PPO.load(model2_path)
# model_3 = PPO.load(model3_path)
# vec_env.env_method("demo")
obs = vec_env.reset()

# while not dones:
while True:
    obs = vec_env.reset()
    dones = False
    # vec_env.env_method("set_evaluate_func","pick")
    # goal_pos = [0.5, 0.3, 0.3]
    # vec_env.env_method("task_pick_init_goal",goal_pos)
    while not dones:
        
        # action, _states = model_1.predict(obs)
        # 左右履带速度计算
        action = np.array([[0.0, 0.0, gripper_action]])

        forward_speed = action[0][0]*50
        rotate_speed = action[0][1]*50

        left_speed = forward_speed - rotate_speed
        right_speed = forward_speed + rotate_speed

        # 如果需要限制速度范围，比如 [-1, 1]
        left_speed = max(-50, min(50, left_speed))
        right_speed = max(-50, min(50, right_speed))
        # agent.set_wheel_speed_millisecs(left_speed, right_speed, 100)


        vec_env.env_method("set_robot_pos",cocube_pos, cocube_rot)
        
        
        obs, rewards, dones, info = vec_env.step(action)
        # print("task_1_rewards:", rewards)
        # if(rewards[0] > 0.99):
            # with open("action_for_train/task_pick_success_action.txt", "a+") as f:
            #     f.writelines(str(vec_env.env_method("get_robot_pose"))+'\n')
            # with open("action_for_train/task_pick_success_pos.txt", "a+") as f:
            #     f.writelines(str(vec_env.env_method("get_cube_pose")) +'\n')
        vec_env.render("human")

        

        time.sleep(0.02)

        dones = False


