# 使用Actor-Critic模式学习登录月球游戏

import gym
import torch
import loguru
import fire
import torch.nn as nn
from   torch.distributions import Categorical

logger = loguru.logger
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

class Memory(object):
    def __init__(self) -> None:
        self.actions  = []
        self.states   = []
        self.logprobs = []
        self.rewards  = []
        self.dones    = []

    def clear(self):
        del self.actions[:]
        del self.states[:]
        del self.logprobs[:]
        del self.rewards[:]
        del self.dones[:]


class ActorCritic(nn.Module):
    def __init__(self, state_dim, action_dim, hidden_dim):
        super(ActorCritic, self).__init__()

        #! Policy Network, 根据状态空间通过模型计算采取各个actions的概率分布
        self.actor_layer = nn.Sequential(
                nn.Linear(state_dim, hidden_dim),
                nn.Tanh(),
                nn.Linear(hidden_dim, hidden_dim),
                nn.Tanh(),
                nn.Linear(hidden_dim, action_dim),
                nn.Softmax(dim=-1)
            )

        #! Critic Network, 用于判断actor当前的action是否合理，会得到一个score分数
        self.value_layer = nn.Sequential(
                nn.Linear(state_dim, hidden_dim),
                nn.Tanh(),
                nn.Linear(hidden_dim, hidden_dim),
                nn.Tanh(),
                nn.Linear(hidden_dim, 1)
            )
 
    def forward(self):
        raise NotImplementedError

    def action(self, state, memory):
        """
        @brief 根据环境返回的状态使用actor模型预测action
        
        @param[in] state:  环境返回的状态
        @param[in] memory: 记忆, 这里是一个引用不用返回

        @return: action    返回actor网路选择的action
        """
        state        = torch.from_numpy(state).float().to(device)
        action_probs = self.actor_layer(state)
        dist         = Categorical(action_probs)       #! 创建一个概率分布
        action       = dist.sample()                   #! 根据概率随机选择一个动作

        memory.states.append(state)
        memory.actions.append(action)
        memory.logprobs.append(dist.log_prob(action))  #! 给定选项的对数概率

        return action.item()

    def evaluate(self, state, action):
        action_probs    = self.actor_layer(state)
        dist            = Categorical(action_probs)
        action_logprob  = dist.log_prob(action)

        dist_entropy    = dist.entropy()
        state_value     = self.value_layer(state)
        return action_logprob, torch.squeeze(state_value), dist_entropy


class PPO(object):
    def __init__(self, state_dim, action_dim, hidden_dim, lr=0.001, 
                betas=(0.9, 0.999), gamma=0.99, K_epochs=4, eps_clip=0.2):
        # self.lr       = lr
        # self.betas    = betas
        self.gamma      = gamma
        self.K_epochs   = K_epochs
        self.eps_clip   = eps_clip

        #! 用于训练(参数更新)
        self.policy     = ActorCritic(state_dim, action_dim, hidden_dim).to(device)
        self.optimizer  = torch.optim.Adam(self.policy.parameters(), lr=lr, betas=betas)

        #! 用于执行action获取训练数据
        self.policy_old = ActorCritic(state_dim, action_dim, hidden_dim).to(device)
        self.policy_old.load_state_dict(self.policy.state_dict())

        self.loss       = nn.MSELoss()
 
    def update(self, memory):
        """
        @brief  根据已经收集到的数据训练一次模型
        """
        rewards         = []
        discount_reward = 0
        for reward, done in zip(reversed(memory.rewards), reversed(memory.dones)):
            #! 这里是多轮游戏的结果，需要使用0区分, 奖励累计之能来自单次游戏
            if done:
                discount_reward = 0
            discount_reward = reward + (self.gamma * discount_reward)
            rewards.insert(0, discount_reward)

        #! memory 中每个action获得的真实奖励(当前奖励 + 未来累积奖励)
        #! rewards 默认都是正值, 不太适合网络训练
        # 不好的action我们就应该惩罚, 我们认为均值以下的action是不合理的，应该给他扣分, 所以要减均值
        rewards = torch.tensor(rewards, dtype=torch.float32).to(device)

        #! 多次游戏过程求均值是否合理?????
        rewards = (rewards - rewards.mean()) / (rewards.std() + 1e-5) 


        states   = torch.stack(memory.states).to(device).detach()
        actions  = torch.stack(memory.actions).to(device).detach()
        logprobs = torch.stack(memory.logprobs).to(device).detach()  #! 旧网络在环境数据收集时得到的

        for _ in range(self.K_epochs):            # 每拿到一波数据训练K_epochs次
            #! 用当前模型走一遍, 每K_epochs训练步的第一次结果跟policy_old一致
            # action的对数概率， state的估值， action的信息熵
            _logprobs, _values, _entropy = self.policy.evaluate(states, actions)

            # print(logprobs, "======================\n", _logprobs)
            ratios     = torch.exp(_logprobs - logprobs)   #! 两次的KL散度
            advantages = rewards - _values.detach()        #! 数据采集得到的积分与评价网络的结果比较

            surr1      = ratios * advantages
            surr2      = torch.clamp(ratios, 1 - self.eps_clip, 1 + self.eps_clip) * advantages
            loss       = -torch.min(surr1, surr2) + 0.5*self.loss(_values, rewards) - 0.01 * _entropy

            self.optimizer.zero_grad()
            loss.mean().backward()
            self.optimizer.step()

        #! 新网络的参数更新给旧网络
        self.policy_old.load_state_dict(self.policy.state_dict())


def train():
    env         = gym.make('LunarLander-v2')
    state_dim   = env.observation_space.shape[0]
    logger.info(f"state dims: {env.observation_space.shape}")  # (8,)

    action_dim  = env.action_space.n                           # 4
    logger.info(f"action dim: {env.action_space.n}")

    render          = False         # 是否可视化训练过程
    log_interval    = 20            # 日志的间隔
    max_episodes    = 50000         # 最多玩多少次
    max_steps       = 350           # 每次最多玩多少step
    update_interval = 2000          # 收集多少次数据训练一次模型
    reward_target   = 1000
    hidden_dim      = 64
    lr              = 0.002
    betas           = (0.9, 0.99)
    gamma           = 0.95  
    K_epochs        = 4             # 每次收集的收据训练模型几次
    eps_clip        = 0.2
    random_seed     = None

    if random_seed:
        torch.manual_seed(random_seed)
        env.seed(random_seed)

    memory     = Memory()
    ppo        = PPO(state_dim, action_dim, hidden_dim, lr, betas, gamma, K_epochs, eps_clip)

    run_reward = 0
    avg_length = 0
    timestep   = 0

    for episode in range(1, max_episodes+1):
        state = env.reset()[0]
        for step in range(max_steps):                             #! 使用旧网络试玩
            timestep += 1
            action    = ppo.policy_old.action(state, memory)      #! 用旧模型试玩去收集数据
            state, reward, done, _, _ = env.step(action)

            # 旧模型只会搜集state，action和logprob数据
            memory.rewards.append(reward)
            memory.dones.append(done)

            #! 指定次数训练一次而不是玩死一次才训练一次
            if timestep % update_interval == 0:
                ppo.update(memory)                                # 训练一次会更新旧模型的参数
                memory.clear()
                timestep = 0

            run_reward += reward
            if render:
                env.render()

            if done:
                break

        avg_length += step    # 每个episode收集了多少步的数据

        if run_reward > reward_target:
            torch.save(ppo.policy.state_dict(), f"LunarLander-v2-{int(run_reward)}.pth")
            logger.warning(f"模型训练达到指定分数,保存模型并退出...")
            break

        #! 平均每log_interval输出一些日志
        if episode % log_interval == 0:
            avg_length = int(avg_length / log_interval)
            logger.info(f"Episode: {episode}, avg_length: {avg_length}, run_reward: {run_reward}")

            avg_length  = 0
            run_reward  = 0

def eval(step):
    env         = gym.make('LunarLander-v2', 
                           #render_mode='human'
                           )
    state_dim   = env.observation_space.shape[0]
    logger.info(f"state dims: {env.observation_space.shape}")  # (8,)

    action_dim  = env.action_space.n                           # 4
    logger.info(f"action dim: {env.action_space.n}")

    render          = False   
    max_episodes    = 10         
    hidden_dim      = 64
    ppo             = PPO(state_dim, action_dim, hidden_dim)

    state_dict = torch.load(f"LunarLander-v2-{step}.pth")
    ppo.policy.load_state_dict(state_dict)

    for episode in range(max_episodes):
        state   = env.reset()[0]
        steps   = 0
        rewards = 0
        while True:
            action_probs = ppo.policy.actor_layer(torch.from_numpy(state).float().to(device))
            dist         = Categorical(action_probs)     
            action       = dist.sample().item()                 
            state, reward, done, _, _ = env.step(action)
            steps   += 1
            rewards += reward

            if steps % 1000 == 0:
                logger.info(f"已经完了{steps}步...")
                if render:
                    env.render()

            if done:
                logger.warning(f"第{episode}次尝试结束, 共尝试{steps}次动作，获得{rewards}分奖励...")
                break

if __name__ == "__main__":
    # fire.Fire(train)
    fire.Fire(eval)