#!/usr/bin/env python
# coding: utf-8

# 状态价值函数:
# 
# V(state) = 所有动作求和 -> 概率(action) * Q(state,action)
# 
# 对这个式子做变形得到:
# 
# V(state) = 所有动作求和 -> 现概率(action) * \[旧概率(action) / 现概率(action)\] * Q(state,action)
# 
# 初始时可以认为现概率和旧概率相等,但随着模型的更新,现概率会变化.
# 
# 式子中的Q(state,action)可以用蒙特卡洛法估计.
# 
# 按照策略梯度的理论,状态价值取决于动作的质量,所以只要最大化V函数,就可以得到最好的动作策略.

# In[1]:


import gym


# import matplotlib
# # matplotlib.use('TkAgg')  # 指定 TkAgg 作为后端
# matplotlib.use('Qt5Agg')  # 指定 Qt5Agg 作为后端


# 定义环境
class MyWrapper(gym.Wrapper):

    def __init__(self):
        env = gym.make('CartPole-v1', render_mode='rgb_array')
        super().__init__(env)
        self.env = env

        self.reward_threshold = 200
        # self.reward_threshold = self.env.spec.reward_threshold
        self.step_n = 0

    def reset(self):
        state, _ = self.env.reset()
        self.step_n = 0
        return state

    def step(self, action):
        state, reward, terminated, truncated, info = self.env.step(action)
        over = terminated or truncated

        # 限制最大步数
        self.step_n += 1
        if self.step_n >= self.reward_threshold:
            over = True

        # 没坚持到最后,扣分
        if over and self.step_n < self.reward_threshold:
            reward = -100

        return state, reward, over

    # 打印游戏图像
    def show(self):
        from matplotlib import pyplot as plt
        plt.figure(figsize=(3, 3))
        plt.imshow(self.env.render())
        plt.show()
        # img = self.env.render()
        # plt.imshow(img)

        # plt.ion()  # 开启交互模式
        # plt.figure(figsize=(3, 3))
        # plt.imshow(self.env.render())
        # plt.show(block=False)  # 非阻塞
        # plt.pause(2)  # 保持 2 秒
        # input("Press Enter to exit...")  # 避免窗口立即关闭


env = MyWrapper()

env.reset()

# env.show()

# In[2]:


import torch
import torch.nn.functional as F


class TemperatureModel(torch.nn.Module):
    def __init__(self, temperature=1.0):
        super().__init__()
        self.temperature = temperature
        self.net = torch.nn.Sequential(
            torch.nn.Linear(4, 64),
            torch.nn.ReLU(),
            torch.nn.Linear(64, 64),
            torch.nn.ReLU(),
            torch.nn.Linear(64, 2),
        )

    def forward(self, x, temperature=None):
        x = self.net(x)

        if temperature is None:
            temperature = self.temperature
        temperature = max(temperature, 1e-6)
        x = x / temperature  # 进行温度缩放
        return F.softmax(x, dim=1)  # 使用 functional 版本的 softmax


# 实例化模型
model_action = TemperatureModel(temperature=1)


# 定义模型
# model_action = torch.nn.Sequential(
#     torch.nn.Linear(4, 64),
#     torch.nn.ReLU(),
#     torch.nn.Linear(64, 64),
#     torch.nn.ReLU(),
#     torch.nn.Linear(64, 2),
#     torch.nn.Softmax(dim=1),
# )


model_value = torch.nn.Sequential(
    torch.nn.Linear(4, 64),
    torch.nn.ReLU(),
    torch.nn.Linear(64, 64),
    torch.nn.ReLU(),
    torch.nn.Linear(64, 1),
)

# model_action(torch.randn(2, 4)), model_value(torch.randn(2, 4))

# In[3]:


from IPython import display
import random


# 玩一局游戏并记录数据
def play(show=False):
    state = []
    action = []
    reward = []
    next_state = []
    over = []

    s = env.reset()
    o = False
    while not o:
        # 根据概率采样
        prob = model_action(torch.FloatTensor(s).reshape(1, 4))[0].tolist()
        a = random.choices(range(2), weights=prob, k=1)[0]

        ns, r, o = env.step(a)

        state.append(s)
        action.append(a)
        reward.append(r)
        next_state.append(ns)
        over.append(o)

        s = ns

        if show:
            display.clear_output(wait=True)
            env.show()

    state = torch.FloatTensor(state).reshape(-1, 4)
    action = torch.LongTensor(action).reshape(-1, 1)
    reward = torch.FloatTensor(reward).reshape(-1, 1)
    next_state = torch.FloatTensor(next_state).reshape(-1, 4)
    over = torch.LongTensor(over).reshape(-1, 1)

    return state, action, reward, next_state, over, reward.sum().item()


state, action, reward, next_state, over, reward_sum = play()

reward_sum

# In[4]:


optimizer_action = torch.optim.Adam(model_action.parameters(), lr=1e-3)
optimizer_value = torch.optim.Adam(model_value.parameters(), lr=1e-3)


def requires_grad(model, value):
    for param in model.parameters():
        param.requires_grad_(value)


# In[5]:
MAX_STEPS = env.reward_threshold

PER_VALUE_TRAIN_TIMES = PER_ACTION_TRAIN_TIMES = 1
LOG_INTERVAL = 10

# REWARD_THRESHOLD = 195
REWARD_THRESHOLD = env.reward_threshold * 0.9
MAX_THRESHOLD_K = 3


def train_value(state, reward, next_state, over):
    requires_grad(model_action, False)
    requires_grad(model_value, True)

    # 计算target
    with torch.no_grad():
        target = model_value(next_state)
    target = target * 0.98 * (1 - over) + reward

    # 每批数据反复训练10次
    for _ in range(PER_VALUE_TRAIN_TIMES):
        # 计算value
        value = model_value(state)

        loss = torch.nn.functional.mse_loss(value, target)
        loss.backward()
        optimizer_value.step()
        optimizer_value.zero_grad()

    # 减去value相当于去基线
    return (target - value).detach()


value = train_value(state, reward, next_state, over)

value.shape


# In[6]:


current_epoch_i = 0


def train_action(state, action, value):
    requires_grad(model_action, True)
    requires_grad(model_value, False)

    # 计算当前state的价值,其实就是Q(state,action),这里是用蒙特卡洛法估计的
    delta = []
    for i in range(len(value)):
        s = 0
        for j in range(i, len(value)):
            s += value[j] * (0.98 * 0.95) ** (j - i)
        delta.append(s)
    delta = torch.FloatTensor(delta).reshape(-1, 1)

    # torch.stack((delta, value), dim=-1).dtype
    # torch.set_printoptions(precision=3, sci_mode=False)
    # torch.round(torch.stack((delta, value), dim=-1), decimals=3)

    # if current_i >= 70:
    #     print("--- current_i:", current_i)

    # 更新前的动作概率
    with torch.no_grad():
        prob_old = model_action(state).gather(dim=1, index=action)
        # prob_old = model_action(state, temperature=0.9).gather(dim=1, index=action)
        print('--- prob_old:', prob_old)
        # grpo_prob_length = 10
        # prob_old_ls = [model_action(state).gather(dim=1, index=action) for i in range(grpo_prob_length)]

    # 每批数据反复训练10次
    # loss_ls = []
    for _ in range(PER_ACTION_TRAIN_TIMES):
        # 更新后的动作概率
        prob_new = model_action(state).gather(dim=1, index=action)
        # loss_ls.append(prob_new)
        # torch.all(loss_ls[0] == loss_ls[-1])

        # 求出概率的变化
        ratio = prob_new / prob_old

        # 计算截断的和不截断的两份loss,取其中小的
        surr1 = ratio * delta
        surr2 = ratio.clamp(0.8, 1.2) * delta

        loss = -torch.min(surr1, surr2).mean()
        loss.backward()
        optimizer_action.step()
        optimizer_action.zero_grad()

        # loss_advantage = -torch.min(surr1, surr2).mean()
        # loss_kl = (ratio)
        # loss_ls.append(loss_i)

    # loss_ls = [torch.tensor(0.5), torch.tensor(0.7), torch.tensor(0.3)]
    #
    # # 计算平均值
    # mean_loss = torch.mean(torch.stack(loss_ls))
    # print(mean_loss)
    # loss = torch.mean(loss_ls)
    # 更新参数

    model_action.temperature *= 0.98
    model_action.temperature = max(model_action.temperature, 0.33)
    # print('--- temperature:', model_action.temperature)

    return loss.item()


train_action(state, action, value)

# In[7]:


def train():
    global current_epoch_i

    model_action.train()
    model_value.train()

    threshold_k = 0
    # 训练N局
    for epoch in range(1000):
        # 一个epoch最少玩N步
        current_i = epoch
        steps = 0
        while steps < MAX_STEPS:
            state, action, reward, next_state, over, _ = play()
            steps += len(state)

            # 训练两个模型
            delta = train_value(state, reward, next_state, over)
            loss = train_action(state, action, delta)

        if epoch % LOG_INTERVAL == 0:
            with torch.no_grad():
                test_result = sum([play()[-1] for _ in range(20)]) / 20
            print(epoch, round(loss, 4), test_result)

            if test_result >= REWARD_THRESHOLD:
                threshold_k += 1
                if threshold_k >= MAX_THRESHOLD_K:
                    print(f'===== already achieve the reward_threshold [{REWARD_THRESHOLD}] for [{threshold_k}] times in a row, stop training')
                    break
                else:
                    print(f"--- achieve the reward_threshold [{REWARD_THRESHOLD}], threshold_k [{threshold_k} / {MAX_THRESHOLD_K}]")
            else:
                threshold_k = 0


train()

# In[8]:


# play(True)[-1]
