#!/usr/bin/env python3
'''
依靠随机走法，大量的样本数据筛选出好的走法不断的优化神经网络
'''
import gymnasium as gym, gym.spaces
from collections import namedtuple
import numpy as np
from tensorboardX import SummaryWriter

import torch
import torch.nn as nn
import torch.optim as optim

# 一个episode指的是从游戏开始到游戏结束所有的回合
HIDDEN_SIZE = 128
BATCH_SIZE = 16 #之所以取16，是因为大概100个episode才会有一些比价有价值的成功数据，所以这里以每次训练16个episode为例子
PERCENTILE = 70


class DiscreteOneHotWrapper(gym.ObservationWrapper):
    '''
    创建一个将观察空间转换为one-hot编码方式的包装器
    这个使用了装饰器模式

    '''
    def __init__(self, env):
        super(DiscreteOneHotWrapper, self).__init__(env)
        # 判断观察空间是否是离散值
        assert isinstance(env.observation_space, gym.spaces.Discrete)
        # 需要将观察空间转换为One-Hot编码有助于网络的计算与训练
        self.observation_space = gym.spaces.Box(0.0, 1.0, (env.observation_space.n, ), dtype=np.float32)

    def observation(self, observation):
        res = np.copy(self.observation_space.low)
        res[observation] = 1.0
        return res


class Net(nn.Module):
    def __init__(self, obs_size, hidden_size, n_actions):
        super(Net, self).__init__()
        self.net = nn.Sequential(
            nn.Linear(obs_size, hidden_size),
            nn.ReLU(),
            nn.Linear(hidden_size, n_actions)
        )

    def forward(self, x):
        return self.net(x)


Episode = namedtuple('Episode', field_names=['reward', 'steps'])
EpisodeStep = namedtuple('EpisodeStep', field_names=['observation', 'action'])


def iterate_batches(env, net, batch_size):
    '''
    执行游戏过程，每次游戏结束时都记录到batch中，知道batch中的游戏回合数达到batch_size

    return batch：即batch_size个游戏回合（从游戏开始到游戏结束）
    '''
    batch = []
    episode_reward = 0.0
    episode_steps = []
    obs = env.reset()
    sm = nn.Softmax(dim=1)
    while True:
        obs_v = torch.FloatTensor([obs])
        act_probs_v = sm(net(obs_v))
        act_probs = act_probs_v.data.numpy()[0]
        action = np.random.choice(len(act_probs), p=act_probs)
        next_obs, reward, is_done, _ = env.step(action)
        episode_reward += reward
        episode_steps.append(EpisodeStep(observation=obs, action=action))
        if is_done:
            batch.append(Episode(reward=episode_reward, steps=episode_steps))
            episode_reward = 0.0
            episode_steps = []
            next_obs = env.reset()
            if len(batch) == batch_size:
                yield batch
                batch = []
        obs = next_obs


def filter_batch(batch, percentile):
    '''
    过滤batch，将batch中的reward小于percentile的reward过滤掉
    :param batch: 训练的样本数据
    :param percentile: 每次过滤掉的样本比例，借助np.percentile函数

    return: 过滤后的观察状态，当前场景状态下执行的动作，执行动作后获取的奖励，以及样本batch的奖励平均值（过滤前）
    '''
    rewards = list(map(lambda s: s.reward, batch))
    reward_bound = np.percentile(rewards, percentile)
    reward_mean = float(np.mean(rewards))

    train_obs = []
    train_act = []
    for example in batch:
        if example.reward < reward_bound:
            continue
        train_obs.extend(map(lambda step: step.observation, example.steps))
        train_act.extend(map(lambda step: step.action, example.steps))

    train_obs_v = torch.FloatTensor(train_obs)
    train_act_v = torch.LongTensor(train_act)
    return train_obs_v, train_act_v, reward_bound, reward_mean


if __name__ == "__main__":
    env = DiscreteOneHotWrapper(gym.make("FrozenLake-v1"))
    # env = gym.wrappers.Monitor(env, directory="mon", force=True)
    obs_size = env.observation_space.shape[0]
    n_actions = env.action_space.n

    net = Net(obs_size, HIDDEN_SIZE, n_actions)
    objective = nn.CrossEntropyLoss()
    optimizer = optim.Adam(params=net.parameters(), lr=0.01)
    writer = SummaryWriter(comment="-frozenlake-naive")

    for iter_no, batch in enumerate(iterate_batches(env, net, BATCH_SIZE)):
        '''
        batch: 当前一局游戏的所有操作
        '''
        obs_v, acts_v, reward_b, reward_m = filter_batch(batch, PERCENTILE)
        optimizer.zero_grad()
        action_scores_v = net(obs_v)
        '''
        这个例子中的损失，主要是利用过滤出样本中回报奖励高的数据去训练
        计算当前网路输出的预测动作值与过滤出来的高奖励的动作值之间的差异去训练模型
        '''
        loss_v = objective(action_scores_v, acts_v)
        loss_v.backward()
        optimizer.step()
        print("%d: loss=%.3f, reward_mean=%.1f, reward_bound=%.1f" % (
            iter_no, loss_v.item(), reward_m, reward_b))
        writer.add_scalar("loss", loss_v.item(), iter_no)
        writer.add_scalar("reward_bound", reward_b, iter_no)
        writer.add_scalar("reward_mean", reward_m, iter_no)
        if reward_m > 0.8:
            print("Solved!")
            break
    writer.close()
