import ADRC_reward
import ROSinfo
import rospy
import math
import threading
from autopilot import navigate_to_target
from enu_xyz import GNC_COORDINATE
from queue import Queue
from collections import deque
import numpy as np
import torch
import time

import random
from ADRC_TD3 import TD3
from ADRC_TD3 import OUNoise
from gazebo_msgs.srv import SetModelState
from gazebo_msgs.msg import ModelState
import queue
class LADRC_NUM:
    def __init__(self, r=2, w0=0, wc=0, b0=0):
        self.v1 = 0
        self.v2 = 0
        self.v3 = 0
        self.r = r
        self.h = 0.001
        self.z1 = 0
        self.z2 = 0
        self.z3 = 0
        self.w0 = w0
        self.wc = wc
        self.b0 = b0
        self.u = 0

    # @property
    # def u(self):
    #     return self._u
    #
    # @u.setter
    # def u(self, value):
    #     # 将 value 限制在 -1 到 1 之间
    #     self._u = max(-1, min(value, 1))

class LADRC:
    def __init__(self, ladrc_num):
        self.ladrc_num = ladrc_num


    def LADRC_TD(self, expect):
        fh = -1 * self.ladrc_num.r * self.ladrc_num.r * (self.ladrc_num.v1 - expect) - 2 * self.ladrc_num.r * self.ladrc_num.v2
        self.ladrc_num.v1 += self.ladrc_num.v2 * self.ladrc_num.h
        self.ladrc_num.v2 += fh * self.ladrc_num.h
        # self.ladrc_num.v1 = expect

    def LADRC_ESO(self, feedback):
        Beita_01 = 3 * self.ladrc_num.w0
        Beita_02 = 3 * self.ladrc_num.w0 * self.ladrc_num.w0
        Beita_03 = self.ladrc_num.w0 * self.ladrc_num.w0 * self.ladrc_num.w0

        e = self.ladrc_num.z1 - feedback
        self.ladrc_num.z1 += (self.ladrc_num.z2 - Beita_01 * e) * self.ladrc_num.h
        self.ladrc_num.z2 += (self.ladrc_num.z3 - Beita_02 * e + self.ladrc_num.b0 * self.ladrc_num.u) * self.ladrc_num.h
        self.ladrc_num.z3 += -1 * Beita_03 * e * self.ladrc_num.h

    def LADRC_LF(self):
        Kp = self.ladrc_num.wc * self.ladrc_num.wc
        Kd = 2 * self.ladrc_num.wc
        e1 = self.ladrc_num.v1 - self.ladrc_num.z1
        e2 = -1 * self.ladrc_num.z2
        u0 = Kp * e1 + Kd * e2
        self.ladrc_num.u = (u0 - self.ladrc_num.z3) / self.ladrc_num.b0

    def LADRC_Loop(self, expect_value, measure):
        self.LADRC_TD(expect_value)
        self.LADRC_ESO(measure)
        self.LADRC_LF()


# 在全局范围内创建一个队列
boat_pos_queue = Queue()
boat_yaw_queue = Queue()
boat_vel_queue = Queue()
boat_speed_queue = Queue()
M_PI = math.pi/3

def goto_point(given_point):
    # 等待服务可用
    rospy.wait_for_service('/gazebo/set_model_state')
    try:
        # 创建服务代理
        set_state = rospy.ServiceProxy('/gazebo/set_model_state', SetModelState)

        # 构造状态消息
        state_msg = ModelState()
        state_msg.model_name = 'wamv'
        state_msg.pose.position.x = given_point[0]
        state_msg.pose.position.y = given_point[1]
        state_msg.pose.position.z = 0.0  # 假设 z 坐标为 0
        # 保持默认朝向（或者根据需要设置四元数）
        state_msg.pose.orientation.x = 0.0
        state_msg.pose.orientation.y = 0.0
        state_msg.pose.orientation.z = 0.0
        state_msg.pose.orientation.w = 1.0
        state_msg.reference_frame = 'world'

        # 调用服务进行状态设置（瞬移）
        resp = set_state(state_msg)
        print("无人船已瞬移到目标点：", given_point)
    except rospy.ServiceException as e:
        print("调用 /gazebo/set_model_state 服务失败: %s" % e)


# 定义海况等级数（假设 0,1,2,3,4 共 5 个海况等级）
NUM_SEA_STATES = 4

def get_sea_state_vector(level):
    """
    根据海况等级生成对应的 one-hot 编码向量。

    参数:
        level (int): 海况等级，范围 0 ~ NUM_SEA_STATES-1

    返回:
        np.array: one-hot 编码向量
    """
    if level < 0 or level >= NUM_SEA_STATES:
        raise ValueError("海况等级超出范围")
    vec = np.zeros(NUM_SEA_STATES)
    vec[level] = 1.0
    return vec


def map_action_to_parameters(action):
    """
    将动作向量映射到ADRC参数的范围内。

    参数:
        action (np.array 或 list): 长度为4，取值范围[-1, 1]

    返回:
        tuple: (r, w0, wc, b0)
    """

    # w0 的范围 [500, 1000]（基于500的50%波动）
    w0 = (action[0] + 1) / 2 * (400 - 100) + 100
    b0 = (action[1] + 1) / 2 * (2500 - 500) + 500

    return w0, b0


# =================== 环境接口类 ======================
class USVEnvironment(object):
    def __init__(self, points):
        """
        points: 航点列表，包含起始点和目标点
        episode_duration: 每个Episode运行的时间（秒）或者步数
        """
        self.points = points
        self.given_point = points[0].copy()
        self.new_point = points[1].copy()

    def reset(self):


        origin_point = [-350, 232]  # 初始位置
        goto_point(origin_point)
        time.sleep(0.5)
        rosvrx_control.manual_act(0, 0, 0)

        # 清空队列，防止旧数据影响
        while not boat_pos_queue.empty():
            boat_pos_queue.get()
        while not boat_yaw_queue.empty():
            boat_yaw_queue.get()
        # 重置环境：将船只瞬移到起始位置
        origin_point = [-350, 232]  # 初始位置
        goto_point(origin_point)
        # time.sleep(0.5)
        # rosvrx_control.manual_act(0, 0, 0)
        # # 获取更新后的状态数据
        # boat_x, boat_y = boat_pos_queue.get()
        # yaw = boat_yaw_queue.get()
        # forward_v, lateral_v = boat_vel_queue.get()
        # # 计算当前误差
        # t_error, ang_error = ADRC_reward.calculate_metrics(
        #     yaw, forward_v, lateral_v,
        #     self.given_point[0], self.given_point[1],
        #     self.new_point[0], self.new_point[1],
        #     boat_x, boat_y
        # )
        # print(t_error)


        state = get_sea_state_vector(3)
        return state

    def step(self, action_params):
        """
        在一个 Episode 内使用固定的一组 ADRC 参数 (action_params)
        进行控制，固定运行 500 个控制步，累计整个 Episode 的奖励，
        并返回累计奖励和 done 标志（由于 state 固定，所以不再返回状态）。
        """
        cumulative_reward = 0.0
        done = False
        turn_values = deque(maxlen=20)
        step = 0
        # 使用固定的 ADRC 参数进行控制
        w0, b0 = action_params
        ADRC_param = LADRC_NUM(2, w0, w0 / 4, b0)
        ADRC_ctl = LADRC(ADRC_param)
        # 固定运行 500 步
        while 1:
            # # 从队列中获取当前船的位置和航向数据
            # boat_x, boat_y = boat_pos_queue.get()
            # yaw = boat_yaw_queue.get()
            try:
                boat_x, boat_y = boat_pos_queue.get_nowait()
                yaw = boat_yaw_queue.get_nowait()
            except queue.Empty:
                continue  # 如果数据还没来，跳过这次循环

            if step > 400:
                break
            # 执行 ADRC 控制律
            else:
                dis, exceped_ang = ADRC_reward.calculate_metrics(yaw, 0, 0, self.given_point[0], self.given_point[1],
                    self.new_point[0], self.new_point[1], boat_x, boat_y)
                ADRC_ctl.LADRC_Loop(0, exceped_ang)
                turn = ADRC_param.u
                rosvrx_control.manual_act(0.8, 0.8, turn)
                turn_values.append(turn)
                # print(dis,exceped_ang)
                # 计算当前步的奖励
                reward, done = ADRC_reward.reward_function(dis, exceped_ang, turn_values, False)
                cumulative_reward += reward
                if done:
                    cumulative_reward -= 100
                    break
            step += 1
        # Episode 结束后返回累计奖励和 done 标志
        return cumulative_reward, done


# =================== 训练主循环 ======================
def train(points):
    # 定义航点，例如[起始点, 目标点]
    env = USVEnvironment(points)

    # 初始化RL agent
    state_dim = 4  # 误差信息及变化率
    action_dim = 2  # ADRC 参数：w0, b0
    hidden_dim = 128
    agent = TD3(action_dim, state_dim, hidden_dim)
    model = agent.policy_net
    ou_noise = OUNoise(ADRC_reward.USV_ActionSpace())

    max_episodes = 2000
    batch_size = 128
    frame_idx = 0
    rewards = []

    for episode in range(max_episodes):
        state = env.reset()  # 重置环境，获取初始状态
        ou_noise.reset()

        # 根据当前状态选择一个动作（参数），该动作在整个Episode内保持不变
        action = agent.policy_net.get_action(state)
        noise_action, ou_state = ou_noise.get_action(action, frame_idx)

        noise_action = noise_action.reshape(-1)
        print(action, ou_state,noise_action)
        # 将噪声动作映射到ADRC参数空间
        w0, b0 = map_action_to_parameters(noise_action)
        # print('当前参数:','r=',r, 'w0=',w0,'wc=',wc,'b0=',b0)
        action_params = (w0, b0)

        # 使用该组参数运行整个Episode
        cumulative_reward, done = env.step(action_params)

        next_state = state
        # 将整个Episode的经验存入经验回放池
        agent.replay_buffer.push(state, noise_action, cumulative_reward, next_state, done)

        if len(agent.replay_buffer) > batch_size:
            agent.ddpg_update()

        rewards.append(cumulative_reward)
        frame_idx += 1
        print("Episode: {}, Cumulative Reward: {}, Action Params: {}".format(episode, cumulative_reward, action_params))
        if episode % 100 == 0:
            torch.save(model.state_dict(), '/home/ltp/train model/5/net-weight.pt')
    ADRC_reward.plot(frame_idx, rewards)


if __name__ == "__main__":
    gnc_coordinate = GNC_COORDINATE()

    # hangdian1_lon = 150.6749317
    # hangdian1_lat = -33.7222442
    # hangdian2_lon = 150.6759145
    # hangdian2_lat = -33.7226047
    # hangdian3_lon = 150.6763608
    # hangdian3_lat = -33.7219765
    # hangdian4_lon = 150.6754145
    # hangdian4_lat = -33.7216427

    # 训练路径航点
    points = [
        [150.6755737, -33.7221389],  # 第一个坐标点
        [150.6825302, -33.7221389],  # 第二个坐标点
    ]
    # 设置初始点经纬度
    origin_lon = points[0][0]
    origin_lat = points[0][1]
    #写一个for循环，将每个点的经纬度转换为坐标系
    for i in range(len(points)):
        points[i][0] = gnc_coordinate.Lon_trans_to_E(points[i][0], points[i][1], origin_lon, origin_lat)
        points[i][1] = gnc_coordinate.Lat_trans_to_N(points[i][0], points[i][1], origin_lon, origin_lat)


    print("points = [")
    for sublist in points:
        print("    [", end="")
        for item in sublist:
            print(item, end=", ")
        print("],")
    print("]")


    def update_func_gps(handle, lat, lon, alt):
        # 将ROS传回的经纬度转换为坐标系
        boat_x = gnc_coordinate.Lon_trans_to_E(lon, lat, origin_lon, origin_lat)
        boat_y = gnc_coordinate.Lat_trans_to_N(lon, lat, origin_lon, origin_lat)
        boat_pos_queue.put((boat_x, boat_y))


    def update_func_ea(handle, yaw, pitch, roll):
        boat_yaw_queue.put(yaw)

    def update_func_ground_speed(handle, speed , forward_v , lateral_v):
        boat_vel_queue.put((forward_v,lateral_v))
        boat_speed_queue.put(speed)

    def update_func_gyro(handle, gyroX, gyroY, gyroZ):
        pass

    def update_func_acc(handle, accX, accY, accZ):
        pass

    rospy.init_node('my_ROSVRX_node')

    update_handle = None

    ros_vrx_info = ROSinfo.ROSVRXInfo(update_func_gps, update_func_ea, update_func_ground_speed,
                              update_func_gyro, update_func_acc, update_handle)

    propeller_mid = 0
    servo_mid = 0
    rosvrx_control = ROSinfo.propulsion_VRX(propeller_mid, servo_mid)


    # 创建一个线程来发布命令
    train_thread = threading.Thread(target=train, args=(points,))
    train_thread.start()

    try:
        ros_vrx_info.ROSVRX_info_create()

    except rospy.ROSInterruptException:
        pass
