import numpy as np
import matplotlib.pyplot as plt

# 环境参数
goal_position = np.array([10, 10])  # 目标位置
state_space = np.array([20, 20])  # 位置空间
velocity_space = np.array([-1, 1])  # 速度空间 [-1, 1] 表示最小和最大速度
action_space = np.array([[0, 1], [0, -1], [1, 0], [-1, 0]])  # 动作空间（速度增量）

# Q表初始化，速度维度有3种可能值 [-1, 0, 1]
q_table = np.zeros((*state_space, len(range(*velocity_space)), len(range(*velocity_space)), len(action_space)))

# 学习参数
learning_rate = 0.1
discount_factor = 0.9
epsilon = 0.1
max_episodes = 500

# 用于绘制的记录
episode_rewards = []  # 存储每个回合的累计奖励
state_trajectory = []  # 存储每个回合的状态轨迹


# 初始化无人机位置和速度
def reset():
    return np.array([0, 0]), np.array([0, 0])  # 位置和速度初始为0


# 选择动作
def choose_action(state, velocity):
    velocity_index = velocity + np.array([1, 1])  # 将速度映射到索引范围 [0, 1, 2]
    velocity_index = velocity_index.astype(int)  # 确保索引是整数
    if np.random.rand() < epsilon:
        return np.random.randint(len(action_space))
    else:
        return np.argmax(q_table[state[0], state[1], velocity_index[0], velocity_index[1], :])


# 更新Q表
def update_q_table(state, velocity, action, reward, next_state, next_velocity):
    velocity_index = velocity + np.array([1, 1])  # 将速度映射到索引范围 [0, 1, 2]
    next_velocity_index = next_velocity + np.array([1, 1])  # 同样对下一速度进行映射
    velocity_index = velocity_index.astype(int)  # 确保索引是整数
    next_velocity_index = next_velocity_index.astype(int)  # 确保索引是整数
    best_next_action = np.argmax(
        q_table[next_state[0], next_state[1], next_velocity_index[0], next_velocity_index[1], :])
    td_target = reward + discount_factor * q_table[
        next_state[0], next_state[1], next_velocity_index[0], next_velocity_index[1], best_next_action]
    td_error = td_target - q_table[state[0], state[1], velocity_index[0], velocity_index[1], action]
    q_table[state[0], state[1], velocity_index[0], velocity_index[1], action] += learning_rate * td_error


# 获取奖励
def get_reward(state):
    if np.array_equal(state, goal_position):
        return 100
    else:
        return -np.sum(np.abs(goal_position - state))


# 主循环
for episode in range(max_episodes):
    state, velocity = reset()
    done = False
    episode_reward = 0
    trajectory = []

    while not done:
        trajectory.append(state.copy())  # 记录状态轨迹
        action = choose_action(state, velocity)
        next_velocity = velocity + action_space[action]

        # 确保速度在速度空间内
        next_velocity = np.clip(next_velocity, velocity_space[0], velocity_space[1])

        # 更新位置
        next_state = state + next_velocity

        # 确保位置在状态空间内
        next_state = np.clip(next_state, 0, state_space - 1)

        reward = get_reward(next_state)
        update_q_table(state, velocity, action, reward, next_state, next_velocity)

        state = next_state
        velocity = next_velocity
        episode_reward += reward

        if np.array_equal(state, goal_position):
            done = True

        # 调试信息
        print(f"Episode {episode + 1}, State: {state}, Velocity: {velocity}, Reward: {reward}")

    episode_rewards.append(episode_reward)
    state_trajectory.append(trajectory)

# 绘制学习曲线（累计奖励）
plt.figure(figsize=(12, 5))
plt.subplot(1, 2, 1)
plt.plot(episode_rewards)
plt.title('Learning Curve')
plt.xlabel('Episodes')
plt.ylabel('Cumulative Reward')

# 绘制状态曲线（最后一次回合的状态轨迹）
plt.subplot(1, 2, 2)
trajectory = np.array(state_trajectory[-1])
plt.plot(trajectory[:, 0], trajectory[:, 1], marker='o')
plt.title('State Trajectory (Last Episode)')
plt.xlabel('X Position')
plt.ylabel('Y Position')
plt.grid()

plt.show()
