﻿import numpy as np
import gymnasium as gym
import random
from collections import defaultdict

# 超参数配置
ALPHA = 0.1  # 学习率
GAMMA = 0.99  # 折扣因子
EPSILON = 1.0  # 初始探索概率
EPSILON_MIN = 0.01  # 最小探索概率
EPSILON_DECAY = 0.995  # 探索概率衰减因子
EPISODES = 500  # 总训练回合数
MAX_STEPS = 200  # 每回合的最大步数

# Q 表初始化
Q = defaultdict(lambda: np.zeros(4))  # 动作空间大小为 4（不动作、主引擎、左引擎、右引擎）

# SARSA 策略：获取下一个动作（基于 epsilon-greedy 策略）
def get_action(state, epsilon):
    if random.uniform(0, 1) < epsilon:
        return np.random.choice(4)  # 随机选择动作
    else:
        return np.argmax(Q[state])  # 贪婪选择动作

# Q-Learning 策略：更新 Q 表
def q_learning_update(state, action, reward, next_state, done):
    best_next_action = np.argmax(Q[next_state])  # Q-Learning 使用贪婪策略
    td_target = reward + GAMMA * Q[next_state][best_next_action] * (not done)
    td_error = td_target - Q[state][action]
    Q[state][action] += ALPHA * td_error  # 更新 Q 值

# SARSA 策略：更新 Q 表
def sarsa_update(state, action, reward, next_state, next_action, done):
    td_target = reward + GAMMA * Q[next_state][next_action] * (not done)
    td_error = td_target - Q[state][action]
    Q[state][action] += ALPHA * td_error  # 更新 Q 值

# 主训练循环
def train(env, use_sarsa=False):
    global EPSILON  # 使用全局变量更新 epsilon
    rewards = []  # 记录每回合的总奖励

    for episode in range(EPISODES):
        state, _ = env.reset()  # 重置环境
        state = tuple(np.round(state, 1))  # 离散化状态空间
        action = get_action(state, EPSILON)  # 选择初始动作
        total_reward = 0

        for step in range(MAX_STEPS):
            next_state, reward, done, truncated, _ = env.step(action)
            next_state = tuple(np.round(next_state, 1))  # 离散化下一状态
            total_reward += reward

            if use_sarsa:
                # 使用 SARSA 策略更新
                next_action = get_action(next_state, EPSILON)
                sarsa_update(state, action, reward, next_state, next_action, done)
                action = next_action  # 更新当前动作
            else:
                # 使用 Q-Learning 策略更新
                q_learning_update(state, action, reward, next_state, done)

            state = next_state  # 更新当前状态

            if done or truncated:  # 回合结束
                break

        # 记录当前回合的奖励
        rewards.append(total_reward)

        # 衰减 epsilon
        EPSILON = max(EPSILON_MIN, EPSILON * EPSILON_DECAY)

        # 输出训练进度
        print(f"Episode {episode + 1}/{EPISODES}, Total Reward: {total_reward}, Epsilon: {EPSILON:.2f}")

    return rewards

# 主函数
if __name__ == "__main__":
    # 创建环境
    env = gym.make("LunarLander-v3", render_mode='human')

    # 选择算法：True 使用 SARSA，False 使用 Q-Learning
    use_sarsa = False

    # 开始训练
    print(f"Training with {'SARSA' if use_sarsa else 'Q-Learning'}...")
    rewards = train(env, use_sarsa=use_sarsa)

    # 测试模型
    print("Testing trained agent...")
    EPSILON = 0.0  # 测试时禁用探索
    for _ in range(5):  # 测试 5 回合
        state, _ = env.reset()
        state = tuple(np.round(state, 1))
        total_reward = 0

        for _ in range(MAX_STEPS):
            action = get_action(state, EPSILON)  # 使用训练后的 Q 表选择动作
            next_state, reward, done, truncated, _ = env.step(action)
            total_reward += reward
            state = tuple(np.round(next_state, 1))
            env.render()

            if done or truncated:
                break

        print(f"Total Reward: {total_reward}")

    env.close()
