#!/usr/bin/env python
# coding: utf-8

"""
- [PPO算法基本原理及流程图（KL penalty和Clip两种方法）](https://zhuanlan.zhihu.com/p/641099929)
- [[Deepseek v3技术报告学习] 4.GRPO](https://zhuanlan.zhihu.com/p/15922703850)

"""

# 状态价值函数:
# 
# V(state) = 所有动作求和 -> 概率(action) * Q(state,action)
# 
# 对这个式子做变形得到:
# 
# V(state) = 所有动作求和 -> 现概率(action) * \[旧概率(action) / 现概率(action)\] * Q(state,action)
# 
# 初始时可以认为现概率和旧概率相等,但随着模型的更新,现概率会变化.
# 
# 式子中的Q(state,action)可以用蒙特卡洛法估计.
# 
# 按照策略梯度的理论,状态价值取决于动作的质量,所以只要最大化V函数,就可以得到最好的动作策略.

# In[1]:


import gym
import numpy as np


# import matplotlib
# # matplotlib.use('TkAgg')  # 指定 TkAgg 作为后端
# matplotlib.use('Qt5Agg')  # 指定 Qt5Agg 作为后端


# --- 环境参数
MAX_STEPS_PER_EPOCH = 1000

NUM_ACTION_SPACE = 2
NUM_OBSERVATION_SPACE = 4


# 定义环境
class MyWrapper(gym.Wrapper):

    def __init__(self):
        env = gym.make('CartPole-v1', render_mode='rgb_array')
        super().__init__(env)
        self.env = env

        # self.reward_threshold = 200
        self.reward_threshold = self.env.spec.reward_threshold
        self.step_n = 0

    def reset(self):
        state, _ = self.env.reset()
        self.step_n = 0
        return state

    # --- 自定义reward
    def step(self, action):
        state, reward, terminated, truncated, info = self.env.step(action)
        over = terminated or truncated

        # 限制最大步数
        self.step_n += 1

        # if self.step_n >= self.reward_threshold:
        if self.step_n >= self.reward_threshold + 100:  # 防卡死
            over = True

        # --- 没坚持到最后,扣分
        # if over and self.step_n < self.reward_threshold:
        #     reward = 0

        return state, reward, over

    # 打印游戏图像
    def show(self):
        from matplotlib import pyplot as plt
        plt.figure(figsize=(3, 3))
        plt.imshow(self.env.render())
        plt.show()
        # img = self.env.render()
        # plt.imshow(img)

        # plt.ion()  # 开启交互模式
        # plt.figure(figsize=(3, 3))
        # plt.imshow(self.env.render())
        # plt.show(block=False)  # 非阻塞
        # plt.pause(2)  # 保持 2 秒
        # input("Press Enter to exit...")  # 避免窗口立即关闭


env = MyWrapper()

env.reset()

# env.show()

# In[2]:


import torch
import torch.nn.functional as F
from bdtime import tt


class TemperatureModel(torch.nn.Module):
    def __init__(self, temperature=1.0):
        super().__init__()
        self.temperature = temperature
        self.net = torch.nn.Sequential(
            torch.nn.Linear(NUM_OBSERVATION_SPACE, 64),
            torch.nn.ReLU(),
            torch.nn.Linear(64, 64),
            torch.nn.ReLU(),
            torch.nn.Linear(64, NUM_ACTION_SPACE),
        )

    def forward(self, x, temperature=None):
        x = self.net(x)

        if temperature is None:
            temperature = self.temperature
        temperature = max(temperature, 1e-6)
        x = x / temperature  # 进行温度缩放
        return F.softmax(x, dim=1)  # 使用 functional 版本的 softmax


# 实例化模型
model_action = TemperatureModel(temperature=1)
# ref_model_action = TemperatureModel(temperature=1)

# 定义模型
# model_action = torch.nn.Sequential(
#     torch.nn.Linear(4, 64),
#     torch.nn.ReLU(),
#     torch.nn.Linear(64, 64),
#     torch.nn.ReLU(),
#     torch.nn.Linear(64, 2),
#     torch.nn.Softmax(dim=1),
# )


model_value = torch.nn.Sequential(
    torch.nn.Linear(NUM_OBSERVATION_SPACE, 64),
    torch.nn.ReLU(),
    # torch.nn.Linear(64, 64),
    # torch.nn.ReLU(),
    torch.nn.Linear(64, 1),
)

# model_action(torch.randn(2, 4)), model_value(torch.randn(2, 4))

# In[3]:


from IPython import display
import random


# 玩一局游戏并记录数据
def play(show=False):
    state = []
    action = []
    reward = []
    next_state = []
    over = []

    s = env.reset()
    o = False
    while not o:
        # 根据概率采样
        prob = model_action(torch.FloatTensor(s).reshape(1, 4))[0].tolist()
        a = random.choices(range(2), weights=prob, k=1)[0]

        ns, r, o = env.step(a)

        state.append(s)
        action.append(a)
        reward.append(r)
        next_state.append(ns)
        over.append(o)

        s = ns

        if show:
            display.clear_output(wait=True)
            env.show()

    state = torch.FloatTensor(state).reshape(-1, 4)
    action = torch.LongTensor(action).reshape(-1, 1)
    reward = torch.FloatTensor(reward).reshape(-1, 1)
    next_state = torch.FloatTensor(next_state).reshape(-1, 4)
    over = torch.LongTensor(over).reshape(-1, 1)

    return state, action, reward, next_state, over, reward.sum().item()


state, action, reward, next_state, over, reward_sum = play()

reward_sum

# In[4]:


def requires_grad(model, value):
    for param in model.parameters():
        param.requires_grad_(value)


# --- **策略配置参数**

flag__advantage_type = 2  # [1, 2, 3] -> [直接用rewards来估计advantage(无critic), 标准ppo算法的td_error计算方式, grpo算法的advantage计算方法(无critic)]
# LEARNING_RATE = 1e-2 if flag__advantage_type == 3 else 1e-3
LEARNING_RATE = 1e-3


# --- grpo相关参数
BETA = 0.04
num_generations = 8

PER_VALUE_TRAIN_TIMES = PER_ACTION_TRAIN_TIMES = 5
LOG_INTERVAL = 10
TEST_TIMES = 5

REWARD_THRESHOLD = env.reward_threshold
MAX_THRESHOLD_K = 1  # k次达到`MAX_THRESHOLD`后，停止训练

MAX_STEPS = env.reward_threshold
MAX_EPOCH = 500


print('--- REWARD_THRESHOLD:', REWARD_THRESHOLD, f', flag__advantage_type: {flag__advantage_type}')


# --- 统计参数
start_statistic_threshold = 30


optimizer_action = torch.optim.Adam(model_action.parameters(), lr=LEARNING_RATE)
optimizer_value = torch.optim.Adam(model_value.parameters(), lr=LEARNING_RATE)


def train_value(state, reward, next_state, over):
    requires_grad(model_action, False)
    requires_grad(model_value, True)

    # 计算target
    with torch.no_grad():
        target = model_value(next_state)
    target = target * 0.98 * (1 - over) + reward

    # 每批数据反复训练10次
    for _ in range(PER_VALUE_TRAIN_TIMES):
        # 计算value
        value = model_value(state)

        loss = torch.nn.functional.mse_loss(value, target)
        loss.backward()
        optimizer_value.step()
        optimizer_value.zero_grad()

    # 减去value相当于去基线
    return (target - value).detach()


# value = train_value(state, reward, next_state, over)
# value.shape


def get_epoch_reward(reward, over: list):
    if isinstance(over, torch.Tensor):
        over = over.reshape(-1).tolist()

    over_index = -1
    if 1 in over:
        over_index = over.index(1)
    if over_index == -1:
        return reward.sum().item()

    # len(reward)
    return reward[:over_index + 1].sum().item()


def get_value_by_rewards(reward, over):
    rewards_flipped = reward[:, 0].tolist()[::-1]
    over_flipped = over[:, 0].tolist()[::-1]
    for r_i in range(1, len(rewards_flipped)):
        rewards_flipped[r_i] = rewards_flipped[r_i] + rewards_flipped[r_i - 1] * 0.98 * (1 - over_flipped[r_i])
    value = rewards_flipped[::-1]
    return value


# In[6]:


current_epoch_i = 0


def train_action(state, action, td_error):
    requires_grad(model_action, True)
    requires_grad(model_value, False)

    # 计算当前state的价值,其实就是Q(state,action),这里是用蒙特卡洛法估计的
    # delta = []
    # for i in range(len(value)):
    #     s = 0
    #     for j in range(i, len(value)):
    #         s += value[j] * (0.98 * 0.95) ** (j - i)
    #     delta.append(s)
    try:
        td_error = torch.FloatTensor(td_error).reshape(-1, 1)
    except Exception as e:
        print(e)

    # torch.stack((delta, value), dim=-1).dtype
    # torch.set_printoptions(precision=3, sci_mode=False)
    # torch.round(torch.stack((delta, value), dim=-1), decimals=3)

    # if current_i >= 70:
    #     print("--- current_i:", current_i)

    # 更新前的动作概率
    with torch.no_grad():
        prob_old = model_action(state).gather(dim=1, index=action)
        # prob_old = model_action(state, temperature=0.9).gather(dim=1, index=action)
        # print('--- prob_old:', prob_old)
        # grpo_prob_length = 10
        # prob_old_ls = [model_action(state).gather(dim=1, index=action) for i in range(grpo_prob_length)]

    # 每批数据反复训练10次
    # loss_ls = []
    for _ in range(PER_ACTION_TRAIN_TIMES):
        # 更新后的动作概率
        prob_new = model_action(state).gather(dim=1, index=action)
        # loss_ls.append(prob_new)
        # torch.all(loss_ls[0] == loss_ls[-1])

        # 求出概率的变化
        ratio = prob_new / prob_old

        # 计算截断的和不截断的两份loss,取其中小的
        surr1 = ratio * td_error
        surr2 = ratio.clamp(0.8, 1.2) * td_error

        loss = -torch.min(surr1, surr2).mean()
        loss.backward()
        optimizer_action.step()
        optimizer_action.zero_grad()

        # loss_advantage = -torch.min(surr1, surr2).mean()
        # loss_kl = (ratio)
        # loss_ls.append(loss_i)

    # loss_ls = [torch.tensor(0.5), torch.tensor(0.7), torch.tensor(0.3)]
    #
    # # 计算平均值
    # mean_loss = torch.mean(torch.stack(loss_ls))
    # print(mean_loss)
    # loss = torch.mean(loss_ls)
    # 更新参数

    model_action.temperature *= 0.98
    model_action.temperature = max(model_action.temperature, 0.33)
    # print('--- temperature:', model_action.temperature)

    return loss.item()


def train_action_by_grpo(trajectories, advantages):
    # print('--- train_action_by_grpo')

    requires_grad(model_action, True)
    requires_grad(model_value, False)

    # trajectory_i = trajectories[0]
    # state, action, reward, next_state, over, epoch_reward = trajectory_i

    # states = [trajectory_i[0] for trajectory_i in trajectories]
    # actions = [trajectory_i[1] for trajectory_i in trajectories]
    # next_states = [trajectory_i[3] for trajectory_i in trajectories]
    # [state.shape for state in states]
    # [action.shape for action in actions]
    # [next_state.shape for next_state in next_states]
    # state.shape, action.shape, next_state.shape

    prob_old_ls = []
    for i in range(len(trajectories)):
        # i = 0
        trajectory_i = trajectories[i]
        state, action, reward, next_state, over, epoch_reward = trajectory_i

        # 更新前的动作概率
        with torch.no_grad():
            prob_old = model_action(state).gather(dim=1, index=action)
        prob_old_ls.append(prob_old)

        # with torch.no_grad():
        #     prob_new = model_action(state).gather(dim=1, index=action)

    advantages

    for _ in range(PER_ACTION_TRAIN_TIMES):
        # 更新后的动作概率
        prob_new_ls = []
        loss_ls = []
        for i in range(len(trajectories)):
            trajectory_i = trajectories[i]
            state, action, reward, next_state, over, epoch_reward = trajectory_i
            prob_new = model_action(state).gather(dim=1, index=action)
            prob_new_ls.append(prob_new)

            # 求出概率的变化
            prob_old = prob_old_ls[i]
            ratio = prob_new / prob_old

            # --- 计算截断的和不截断的两份loss,取其中小的
            surr1 = ratio * advantages[i]
            surr2 = ratio.clamp(0.8, 1.2) * advantages[i]
            per_token_loss = torch.min(surr1, surr2)

            # --- trl中的实现
            # per_token_loss = torch.exp(prob_new - prob_new.detach()) * advantages[i]

            per_token_kl = ratio - torch.log(ratio) - 1

            loss_i = -(per_token_loss - BETA * per_token_kl)
            # loss_i = -per_token_loss
            loss_i = loss_i.mean()

            # if current_i >= 10:
            #     # per_token_loss.retain_grad()
            #     # per_token_kl.retain_grad()
            #
            #     loss_i.backward()
            #
            #     # per_token_loss.grad
            #     # per_token_loss.requires_grad
            #     #
            #     # 打印梯度
            #     print("per_token_loss 的梯度: ", per_token_loss.grad)
            #     print("per_token_kl 的梯度: ", per_token_kl.grad)

            loss_ls.append(loss_i)

            # print(f"--- loss_i:, {loss_i}")

            # loss_i.backward()
            # optimizer_action.step()
            # optimizer_action.zero_grad()

        loss = sum(loss_ls) / len(loss_ls)

        loss.backward()

        optimizer_action.step()
        optimizer_action.zero_grad()
        # res = loss.detach().item()

    # res = [round(loss_i.detach().item(), 6) for loss_i in loss_ls]

    # rewards = [trajectory_i[2] for trajectory_i in trajectories]
    epoch_reward = [sum(trajectory_i[2]) for trajectory_i in trajectories]
    res = [[round(reward_i.detach().item(), 2), round(advantage_i.detach().item(), 2), round(loss_i.detach().item(), 3)] for loss_i, reward_i, advantage_i in zip(loss_ls, epoch_reward, advantages)]

    res = {"epoch_r, advantage, loss": res, "batch_loss": round(loss.detach().item(), 6)}
    return res


# train_action(state, action, value)

# In[7]:

scores = []


def train():
    global current_epoch_i

    model_action.train()
    model_value.train()

    threshold_k = 0
    total_steps = 0
    # 训练N局
    for epoch in range(MAX_EPOCH):
        # 一个epoch最少玩N步
        current_epoch_i = epoch
        steps = 0

        # if epoch > 50:
        #     print(1)
        while steps < MAX_STEPS:
            if flag__advantage_type == 1:
                # --- 直接用rewards来估计advantage
                state, action, reward, next_state, over, _ = play()
                steps += len(state)
                value = get_value_by_rewards(reward, over)
                td_error = value

            elif flag__advantage_type == 2:
                # --- 标准ppo算法的td_error计算方式
                state, action, reward, next_state, over, _ = play()
                steps += len(state)
                td_error = train_value(state, reward, next_state, over)
            else:
                # todo: 这里要完成`grpo`算法的`advantage`计算方法
                # raise NotImplementedError

                trajectories = []
                for i in range(num_generations):
                    with torch.no_grad():
                        state, action, reward, next_state, over, _ = play()
                        steps += len(state)
                        # value = get_value_by_rewards(reward, over)
                        epoch_reward = get_epoch_reward(reward, over)
                        trajectories.append([state, action, reward, next_state, over, epoch_reward])

                # state, action, reward, next_state, over, value = trajectories[0]
                # _t_i = trajectories[1]
                # reward_ls = _t_i[2].reshape(-1).tolist()
                # over_ls = _t_i[4].reshape(-1).tolist()
                # epoch_reward = _t_i[-1]
                # print(epoch_reward, '---', len(reward_ls), '---', reward_ls, '---', sum(reward_ls))
                # print(over_ls)

                rewards = [_t_i[-1] for _t_i in trajectories]
                rewards = torch.FloatTensor(rewards)

                mean_grouped_rewards = rewards.view(-1, num_generations).mean(dim=1)
                std_grouped_rewards = rewards.view(-1, num_generations).std(dim=1)
                mean_grouped_rewards = mean_grouped_rewards.repeat_interleave(num_generations, dim=0)
                std_grouped_rewards = std_grouped_rewards.repeat_interleave(num_generations, dim=0)
                advantages = (rewards - mean_grouped_rewards) / (std_grouped_rewards + 1e-4) if num_generations > 1 else rewards

            if flag__advantage_type != 3:
                loss = train_action(state, action, td_error)
            else:
                loss = train_action_by_grpo(trajectories, advantages)

        total_steps += steps

        if epoch % LOG_INTERVAL == 0:
            with torch.no_grad():
                score = sum([play()[-1] for _ in range(TEST_TIMES)]) / TEST_TIMES
                score = round(score, 3)
                scores.append(score)
            print('=== epoch:', epoch, ", score:", score, " --- total_steps:", total_steps, f", now: {tt.now()}", "--- loss:", loss,)

            if score >= REWARD_THRESHOLD:
                threshold_k += 1
                if threshold_k >= MAX_THRESHOLD_K:
                    print(f'===== already achieve the reward_threshold [{REWARD_THRESHOLD}] for [{threshold_k}] times in a row, stop training, now: {tt.now()}')
                    break
                else:
                    print(f"--- achieve the reward_threshold [{REWARD_THRESHOLD}], threshold_k [{threshold_k} / {MAX_THRESHOLD_K}]")
            else:
                threshold_k = 0


train()

# In[8]:

# scores = [1, 2, 3, 5, 10, 20]

np.set_printoptions(suppress=True)
scores = np.array(scores)

start_statistic_index = np.where(scores > start_statistic_threshold)[0][0]
scores = scores[start_statistic_index:]  # 大于一定阈值再开始统计, 不然波动幅度太大了
delta_scores = scores[1:] - scores[:-1]

rates = np.round(delta_scores / scores[:-1], 3)

decline_scores: np.ndarray = delta_scores[delta_scores < 0]
decline_indexes = np.where(delta_scores < 0)[0] + start_statistic_index

if decline_scores.size > 0:
    decline_rates = rates[delta_scores < 0]

    min_decline_rate_value = np.min(decline_rates)
    min_decline_rate_index = np.where(decline_rates == min_decline_rate_value)[0][0]

    rate_times = len(decline_scores) / len(scores)
    # rate_times = len(decline_scores) / current_epoch_i
    print(f'----- decline_scores.length / scores.length: {len(decline_scores)} / {len(scores)} == {rate_times: .2%}')
    print(f'--- decline_rates length: [{len(decline_rates)}] --- mean(%): {round(np.mean(decline_rates), 3): .2%},'
          f' std(%): {round(np.std(decline_rates), 3): .2%} --- min_decline_rate.value: {min_decline_rate_value: .2%}, index: {min_decline_rate_index}')
    print(f'--- decline_scores length: [{len(decline_scores)}] --- mean: {round(np.mean(decline_scores), 3)}, std: {round(np.std(decline_scores), 3)}, min: {np.min(decline_scores)}')
    print(f'--- decline_rates(%): {decline_rates * 100} %')
    print(f'--- decline_scores: {decline_scores}')
    print(f'--- decline_indexes: {decline_indexes}')
# print(f'--- delta_scores rates[{len(rates)}] --- mean(%): {round(np.mean(rates), 3): .2%}, std(%): {round(np.std(rates), 3): .2%} \n --- rates(%): {rates} \n --- delta_scores:', delta_scores)

# import numpy as np
# import matplotlib.pyplot as plt
#
#
# def moving_average(data, window_size=10):
#     return np.convolve(data, np.ones(window_size)/window_size, mode='valid')
#
#
# scores = np.arange(1, 20)
# # scores = [...]  # 你的得分数据
# window_size = 10
# smooth_scores = moving_average(scores, window_size)
#
# plt.plot(scores, label="Raw Scores")
# plt.plot(range(window_size-1, len(scores)), smooth_scores, label=f"Moving Average (window={window_size})")
# plt.legend()
# plt.show()

# play(True)[-1]
