import ADRC_ESOreward
import ROSinfo
import rospy
import math
import threading
from autopilot import navigate_to_target
from enu_xyz import GNC_COORDINATE
from queue import Queue
from collections import deque
import numpy as np
import torch
import time
from tf.transformations import quaternion_from_euler
import random
from ADRC_ESO_TD3 import TD3
from ADRC_ESO_TD3 import OUNoise
from gazebo_msgs.srv import SetModelState
from gazebo_msgs.msg import ModelState
import queue
class LADRC_NUM:
    def __init__(self, r=2.0, w0=0.0, wc=0.0, b0=0.0):
        self.v1 = 0
        self.v2 = 0
        self.v3 = 0
        self.r = r
        self.h = 0.001
        self.z1 = 0
        self.z2 = 0
        self.z3 = 0
        self.w0 = w0
        self.wc = wc
        self.b0 = b0
        self.u = 0

    # @property
    # def u(self):
    #     return self._u
    #
    # @u.setter
    # def u(self, value):
    #     # 将 value 限制在 -1 到 1 之间
    #     self._u = max(-1, min(value, 1))

class LADRC:
    def __init__(self, ladrc_num):
        self.ladrc_num = ladrc_num


    def LADRC_TD(self, expect):
        fh = -1 * self.ladrc_num.r * self.ladrc_num.r * (self.ladrc_num.v1 - expect) - 2 * self.ladrc_num.r * self.ladrc_num.v2
        self.ladrc_num.v1 += self.ladrc_num.v2 * self.ladrc_num.h
        self.ladrc_num.v2 += fh * self.ladrc_num.h
        # self.ladrc_num.v1 = expect

    def LADRC_ESO(self, feedback):
        Beita_01 = 3 * self.ladrc_num.w0
        Beita_02 = 3 * self.ladrc_num.w0 * self.ladrc_num.w0
        Beita_03 = self.ladrc_num.w0 * self.ladrc_num.w0 * self.ladrc_num.w0

        e = self.ladrc_num.z1 - feedback
        self.ladrc_num.z1 += (self.ladrc_num.z2 - Beita_01 * e) * self.ladrc_num.h
        self.ladrc_num.z2 += (self.ladrc_num.z3 - Beita_02 * e + self.ladrc_num.b0 * self.ladrc_num.u) * self.ladrc_num.h
        self.ladrc_num.z3 += -1 * Beita_03 * e * self.ladrc_num.h

    def LADRC_LF(self):
        Kp = self.ladrc_num.wc * self.ladrc_num.wc
        Kd = 2 * self.ladrc_num.wc
        e1 = self.ladrc_num.v1 - self.ladrc_num.z1
        e2 = -1 * self.ladrc_num.z2
        u0 = Kp * e1 + Kd * e2
        self.ladrc_num.u = (u0 - self.ladrc_num.z3) / self.ladrc_num.b0

    def LADRC_Loop(self, expect_value, measure):
        self.LADRC_TD(expect_value)
        self.LADRC_ESO(measure)
        self.LADRC_LF()


# 在全局范围内创建一个队列
boat_pos_queue = Queue()
boat_yaw_queue = Queue()
boat_vel_queue = Queue()
boat_speed_queue = Queue()
M_PI = math.pi/3


def goto_point(given_point):
    # 等待服务可用
    rospy.wait_for_service('/gazebo/set_model_state')
    try:
        # 创建服务代理
        set_state = rospy.ServiceProxy('/gazebo/set_model_state', SetModelState)

        # 生成随机朝向角度
        if given_point[1] < 232:
            yaw_deg = random.uniform(0, 45)  # 0 到 90 度
        else:
            yaw_deg = random.uniform(-45, 0)  # -90 到 0 度

        yaw_rad = yaw_deg * (3.14159265 / 180.0)  # 转换为弧度
        quat = quaternion_from_euler(0, 0, yaw_rad)  # 生成四元数

        # 构造状态消息
        state_msg = ModelState()
        state_msg.model_name = 'wamv'
        state_msg.pose.position.x = given_point[0]
        state_msg.pose.position.y = given_point[1]
        state_msg.pose.position.z = 0.0  # 假设 z 坐标为 0
        state_msg.pose.orientation.x = quat[0]
        state_msg.pose.orientation.y = quat[1]
        state_msg.pose.orientation.z = quat[2]
        state_msg.pose.orientation.w = quat[3]
        state_msg.reference_frame = 'world'

        # 调用服务进行状态设置（瞬移）
        resp = set_state(state_msg)
        print(f"无人船已瞬移到目标点 {given_point}，并随机旋转 {yaw_deg:.2f} 度")
    except rospy.ServiceException as e:
        print("调用 /gazebo/set_model_state 服务失败: %s" % e)


# 定义海况等级数（假设 0,1,2,3,4 共 5 个海况等级）
NUM_SEA_STATES = 4

def get_sea_state_vector(level):
    """
    根据海况等级生成对应的 one-hot 编码向量。

    参数:
        level (int): 海况等级，范围 0 ~ NUM_SEA_STATES-1

    返回:
        np.array: one-hot 编码向量
    """
    if level < 0 or level >= NUM_SEA_STATES:
        raise ValueError("海况等级超出范围")
    vec = np.zeros(NUM_SEA_STATES)
    vec[level] = 1.0
    return vec


def map_action_to_parameters(action):
    """
    将动作向量映射到ADRC参数的范围内。

    参数:
        action (np.array 或 list): 长度为2，取值范围[-1, 1]

    返回:
        tuple: (w0, b0)
    """

    # w0 在 [100, 300] 之间浮动
    w0 = (action[0] + 1) / 2 * (450 - 50) + 50
    # b0 在 [750, 2250] 之间浮动
    b0 = (action[1] + 1) / 2 * (3000 - 750) + 750

    return w0, b0


# =================== 环境接口类 ======================
class USVEnvironment(object):
    def __init__(self, points, max_steps=400):
        """
        points: 航点列表，包含起始点和目标点
        max_steps: 每个 Episode 最多步数
        """
        self.points = points
        self.given_point = points[0].copy()
        self.new_point = points[1].copy()
        self.max_steps = max_steps

        # 先给个初始参数，后面在 reset() 中会重新初始化
        self.ADRC_param = LADRC_NUM(r=2, w0=1.0, wc=0.25, b0=1.0)
        self.ADRC_ctl = LADRC(self.ADRC_param)

        self.step_count = 0
        # 记录上一步的角度误差，用于计算误差变化率
        self.last_angle_error = 0.0

    def reset(self):
        """
        每个 Episode 开始时重置环境和 ADRC 内部状态，
        返回的 state 包含：角度误差、误差变化率（初始为0）以及 ADRC 的 z1, z2, z3
        """
        # 将船瞬移到起始位置
        origin_point = [-350, 232]
        origin_point[1] += random.choice([
                                    random.uniform(5, 12),
                                    random.uniform(-12, -5)
                                ])

        goto_point(origin_point)
        time.sleep(1)
        rosvrx_control.manual_act(0, 0, 0)

        # 清空队列，防止旧数据影响
        while not boat_pos_queue.empty():
            boat_pos_queue.get()
        while not boat_yaw_queue.empty():
            boat_yaw_queue.get()

        # 重新初始化 ADRC 参数和状态
        self.ADRC_param = LADRC_NUM(r=2, w0=200, wc=50, b0=1500)
        self.ADRC_ctl = LADRC(self.ADRC_param)

        self.step_count = 0

        # 获取初始船舶数据（阻塞等待数据）
        boat_x, boat_y, yaw = self._get_latest_boat_data()
        # 计算初始角度误差（假设 calculate_metrics 返回 (dis, angle_error)）
        dis, exceped_ang = ADRC_ESOreward.calculate_metrics(
            yaw, self.given_point[0], self.given_point[1],
            self.new_point[0], self.new_point[1],
            boat_x, boat_y
        )

        # 初始时，误差变化率为 0
        angle_error_deriv = 0.0

        # 记录当前角度误差
        self.last_angle_error = exceped_ang

        # state 包含：角度误差、误差变化率、z1、z2、z3
        state = np.array([dis, exceped_ang, angle_error_deriv, self.ADRC_param.z3], dtype=np.float32)
        return state

    def step(self, action_params):
        """
        与环境交互一步：
          action_params = (w0, b0)
        这里不再新建 LADRC_NUM，而是更新现有 self.ADRC_param
        返回的 state 包含：距离误差、角度误差、误差变化率、z3
        """
        w0, b0 = action_params

        # 只更新 w0, wc, b0，保持其他 ADRC 内部状态不变
        self.ADRC_param.w0 = w0
        self.ADRC_param.wc = w0 / 4  # wc = w0/4 的逻辑
        self.ADRC_param.b0 = b0

        # 获取最新船数据
        boat_x, boat_y, yaw = self._get_latest_boat_data()

        # 计算当前角度误差和距离误差
        dis, exceped_ang = ADRC_ESOreward.calculate_metrics(
            yaw, self.given_point[0], self.given_point[1],
            self.new_point[0], self.new_point[1],
            boat_x, boat_y
        )

        # 执行 ADRC 控制律
        self.ADRC_ctl.LADRC_Loop(0, exceped_ang)
        turn = self.ADRC_param.u
        rosvrx_control.manual_act(0.8, 0.8, turn)

        # 计算角度误差的变化率
        angle_error_deriv = abs(exceped_ang) - abs(self.last_angle_error)
        self.last_angle_error = exceped_ang

        # 计算奖励
        reward = ADRC_ESOreward.reward_function(angle_error_deriv, exceped_ang,dis)

        # 构造下一时刻的 state
        next_state = np.array([dis, exceped_ang, angle_error_deriv, self.ADRC_param.z3], dtype=np.float32)

        return next_state, reward, False, {}

    def _get_latest_boat_data(self):
        """
        阻塞等待获取最新的 (boat_x, boat_y, yaw)。
        """
        boat_x, boat_y = boat_pos_queue.get(block=True)  # 阻塞等待
        yaw = boat_yaw_queue.get(block=True)  # 阻塞等待
        return boat_x, boat_y, yaw


# =================== 训练主循环 ======================
def train(points):
    import os

    # 如果没有data.txt则创建空文件
    if not os.path.exists("data.txt"):
        with open("data.txt", "w") as f:
            f.write("Episode,Step,w0,b0,Noise_w0,Noise_b0,Reward,AngleError,AngleErrorDeriv\n")

    env = USVEnvironment(points, max_steps=400)

    # 假设现在状态维度 = 2 (angle_error, angle_error_deriv)
    # 动作维度 = 2 (w0, b0)
    state_dim = 4
    action_dim = 2
    hidden_dim = 128
    agent = TD3(action_dim, state_dim, hidden_dim)
    model = agent.policy_net

    ou_noise = OUNoise(ADRC_ESOreward.USV_ActionSpace())  # 自定义动作范围
    max_episodes = 700
    max_steps = 500  # 每个 episode 最多 400 步
    batch_size = 256

    all_rewards = []
    frame_idx = 0

    for episode in range(max_episodes):
        state = env.reset()  # 重置环境
        ou_noise.reset()

        episode_reward = 0.0

        for step_i in range(max_steps):
            # 1. 选择动作 (w0, b0)
            action_original = agent.policy_net.get_action(state)  # 网络输出
            noise_action, ou_state = ou_noise.get_action(action_original, frame_idx)
            noise_action = noise_action.reshape(-1)
            w0, b0 = map_action_to_parameters(noise_action)  # 映射到合适区间
            action = np.array([w0, b0], dtype=np.float32)

            # 2. 环境交互
            next_state, reward, done, _ = env.step(action)

            # 3. 记录到 Replay Buffer
            agent.replay_buffer.push(state, noise_action, reward, next_state, done)

            # 4. 更新网络
            if len(agent.replay_buffer) > batch_size:
                agent.ddpg_update()

            # 5. 累加奖励
            episode_reward += reward
            frame_idx += 1
            # 准备下一个 step
            print(step_i, next_state[0], next_state[1] * 180/math.pi, next_state[2], reward, w0, b0)
            state = next_state

        # 6. 把数据写入 data.txt
        with open("data.txt", "a") as f:
            # 这里的 angle_error = next_state[0], angle_error_deriv = next_state[1]
            f.write(f"{episode},{episode_reward}\n")
        all_rewards.append(episode_reward)

        print(f"Episode: {episode}, Reward: {episode_reward:.3f}")

        # 每隔 N 个 episode 保存一次模型
    torch.save(model.state_dict(), '/home/ltp/train model/5/net-weight1.pt')

    # 画图
    ADRC_ESOreward.plot(range(len(all_rewards)), all_rewards)


if __name__ == "__main__":
    gnc_coordinate = GNC_COORDINATE()

    # hangdian1_lon = 150.6749317
    # hangdian1_lat = -33.7222442
    # hangdian2_lon = 150.6759145
    # hangdian2_lat = -33.7226047
    # hangdian3_lon = 150.6763608
    # hangdian3_lat = -33.7219765
    # hangdian4_lon = 150.6754145
    # hangdian4_lat = -33.7216427

    # 训练路径航点
    points = [
        [150.6755737, -33.7221389],  # 第一个坐标点
        [150.6825302, -33.7221389],  # 第二个坐标点
    ]
    # 设置初始点经纬度
    origin_lon = points[0][0]
    origin_lat = points[0][1]
    #写一个for循环，将每个点的经纬度转换为坐标系
    for i in range(len(points)):
        points[i][0] = gnc_coordinate.Lon_trans_to_E(points[i][0], points[i][1], origin_lon, origin_lat)
        points[i][1] = gnc_coordinate.Lat_trans_to_N(points[i][0], points[i][1], origin_lon, origin_lat)


    print("points = [")
    for sublist in points:
        print("    [", end="")
        for item in sublist:
            print(item, end=", ")
        print("],")
    print("]")

    def update_func_gps(handle, lat, lon, alt):
        # 将ROS传回的经纬度转换为坐标系
        boat_x = gnc_coordinate.Lon_trans_to_E(lon, lat, origin_lon, origin_lat)
        boat_y = gnc_coordinate.Lat_trans_to_N(lon, lat, origin_lon, origin_lat)
        boat_pos_queue.put((boat_x, boat_y))

    def update_func_ea(handle, yaw, pitch, roll):
        boat_yaw_queue.put(yaw)

    def update_func_ground_speed(handle, speed , forward_v , lateral_v):
        boat_vel_queue.put((forward_v,lateral_v))
        boat_speed_queue.put(speed)

    def update_func_gyro(handle, gyroX, gyroY, gyroZ):
        pass

    def update_func_acc(handle, accX, accY, accZ):
        pass

    rospy.init_node('my_ROSVRX_node')

    update_handle = None

    ros_vrx_info = ROSinfo.ROSVRXInfo(update_func_gps, update_func_ea, update_func_ground_speed,
                              update_func_gyro, update_func_acc, update_handle)

    propeller_mid = 0
    servo_mid = 0
    rosvrx_control = ROSinfo.propulsion_VRX(propeller_mid, servo_mid)


    # 创建一个线程来发布命令
    train_thread = threading.Thread(target=train, args=(points,))
    train_thread.start()

    try:
        ros_vrx_info.ROSVRX_info_create()

    except rospy.ROSInterruptException:
        pass
