#!/usr/bin/env python3
'''
python .\train_ddpg_simple.py -n caracing
验证未通过

训练记录：
20241010：测试分数上涨至-82+分，训练分数在-82分，继续训练看看，感觉提升不多，是否需要调整网络结构
20241011：调整了网路结构后，训练一直没有进展，暂停训练
'''

import os
import ptan
import time
import gymnasium as gym
import argparse
from tensorboardX import SummaryWriter
import numpy as np
from collections import deque

from lib import model, common

import torch
import torch.optim as optim
import torch.nn.functional as F


ENV_ID = "CarRacing-v2"
GAMMA = 0.99
BATCH_SIZE = 128
LEARNING_RATE = 1e-4
REPLAY_SIZE = 100000 # 重放缓冲区长度，这么长是为了提高稳定性
REPLAY_INITIAL = 10000 # 重放缓冲区初始化大小

TEST_ITERS = 1000

class TransposeObservation(gym.ObservationWrapper):
    def __init__(self, env=None):
        super(TransposeObservation, self).__init__(env)

    def observation(self, observation):
        # 将观察从 (H, W, C) 转换为 (C, H, W)
        return observation.transpose(2, 0, 1)
    
class FrameStackWrapper(gym.Wrapper):
    def __init__(self, env, n_frames=4):
        super().__init__(env)
        self.n_frames = n_frames
        self.frames = deque([], maxlen=n_frames)

        # 假设原始观察空间是一个 Box
        obs_shape = env.observation_space.shape

        # 修改观察空间以适应堆叠的帧
        self.observation_space = gym.spaces.Box(
            low=np.repeat(env.observation_space.low, n_frames, axis=-1),
            high=np.repeat(env.observation_space.high, n_frames, axis=-1),
            dtype=env.observation_space.dtype
        )

    def _get_observation(self):
        # 将帧堆叠在一起
        return np.concatenate(list(self.frames), axis=-1)

    def reset(self, **kwargs):
        obs, info = self.env.reset(**kwargs)
        for _ in range(self.n_frames):
            self.frames.append(obs)
        return self._get_observation(), info

    def step(self, action):
        obs, reward, terminated, truncated, info = self.env.step(action)
        self.frames.append(obs)
        return self._get_observation(), reward, terminated, truncated, info
    

def wrap_dqn(env, stack_frames=4, episodic_life=True, reward_clipping=True):
    env = FrameStackWrapper(env, n_frames=stack_frames)
    env = TransposeObservation(env)
    return env


def test_net(net, env, count=10, device="cpu"):
    '''
    count: 执行游戏的次数（每次都是执行到游戏结束）

    return: （平均奖励，平均步数）
    '''
    rewards = 0.0
    steps = 0
    for _ in range(count):
        obs, _ = env.reset()
        while True:
            obs_v = ptan.agent.float32_preprocessor([obs]).to(device)
            # 根据环境预测动作
            mu_v, _ = net(obs_v)
            action = mu_v.squeeze(dim=0).data.cpu().numpy()
            action = np.clip(action, -1, 1)
            # 执行动作
            obs, reward, done, truncated, _ = env.step(action)
            rewards += reward
            steps += 1
            if done or truncated:
                break
    return rewards / count, steps / count


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--cuda", default=True, action='store_true', help='Enable CUDA')
    parser.add_argument("-n", "--name", required=True, help="Name of the run")
    args = parser.parse_args()
    device = torch.device("cuda" if args.cuda else "cpu")

    save_path = os.path.join("saves", "ddpg-simple-" + args.name)
    os.makedirs(save_path, exist_ok=True)

    env = wrap_dqn(gym.make(ENV_ID, domain_randomize=True, continuous=True))
    test_env = wrap_dqn(gym.make(ENV_ID, continuous=True))

    # 构建动作网络和评价网络
    act_crt_net = model.DDPGActorSimple(env.observation_space.shape, env.action_space.shape[0]).to(device)
    print(act_crt_net)

    # 对于直接输出Q值网络，需要构建一个稳定的目标，因为Q值网络是会根据历史数据进行更新
    # 所以不能马上更新目标网络，为了稳定，否则会因为部分不稳定的数据（偶发的高分或者低分影响）
    tgt_act_crt_net = ptan.agent.TargetNet(act_crt_net)

    writer = SummaryWriter(comment="-simple-ddpg_" + args.name)
    # 构建DDPG代理
    agent = model.AgentDDPGSimple(act_crt_net, device=device)
    exp_source = ptan.experience.ExperienceSourceFirstLast(env, agent, gamma=GAMMA, steps_count=1)
    buffer = ptan.experience.ExperienceReplayBuffer(exp_source, buffer_size=REPLAY_SIZE)
    act_opt = optim.Adam(act_crt_net.actor_params(), lr=LEARNING_RATE)
    crt_opt = optim.Adam(act_crt_net.qval_params(), lr=LEARNING_RATE)

    frame_idx = 0
    # 增加加载模型的代码
    if os.path.exists(save_path) and len(os.listdir(save_path)) > 0:
        # 增加加载模型的代码
        checkpoints = sorted(filter(lambda x: "epoch" in x, os.listdir(save_path)),
<<<<<<< Tabnine <<<<<<<
def wrap_dqn(env, stack_frames=4, episodic_life=True, reward_clipping=True):#+
    """#+
    Wrap the given environment with a series of wrappers to prepare it for DQN training.#+
#+
    This function applies several wrappers to the environment to preprocess observations,#+
    handle episodic life in games, apply no-op resets, process frames, convert images to PyTorch format,#+
    stack frames, and add custom penalties for Breakout.#+
#+
    Parameters:#+
    env (gym.Env): The original Gym environment to be wrapped.#+
    stack_frames (int): The number of frames to stack together. Default is 4.#+
    episodic_life (bool): Whether to treat each life in the game as a separate episode. Default is True.#+
    reward_clipping (bool): Whether to clip rewards. This parameter is currently not used in the function. Default is True.#+
#+
    Returns:#+
    gym.Env: The wrapped environment ready for DQN training.#+
    """#+
    if episodic_life:#+
        env = ptan.common.wrappers.EpisodicLifeEnv(env)#+
    env = ptan.common.wrappers.NoopResetEnv(env, noop_max=30)#+
#+
    if 'FIRE' in env.unwrapped.get_action_meanings():#+
        env = FireResetEnv(env)#+
    env = ptan.common.wrappers.ProcessFrame84(env)#+
    env = ptan.common.wrappers.ImageToPyTorch(env)#+
    env = ptan.common.wrappers.FrameStack(env, stack_frames)#+
    env = BreakoutPenaltyWrapper(env)#+
    return env#+
>>>>>>> Tabnine >>>>>>># {"conversationId":"876e6906-683e-4a98-b9d8-25e9fffb8925","source":"instruct"}
                             key=lambda x: int(x.split('_')[2].split('.')[0]))
        if len(checkpoints) > 0:
            checkpoint = torch.load(os.path.join(save_path, checkpoints[-1]), map_location=device, weights_only=False)
            act_crt_net.load_state_dict(checkpoint['act_crt_net'])
            tgt_act_crt_net.target_model.load_state_dict(checkpoint['tgt_act_crt_net'])
            act_opt.load_state_dict(checkpoint['act_opt'])
            crt_opt.load_state_dict(checkpoint['crt_opt'])
            frame_idx = checkpoint['frame_idx']
            print("加载模型成功")

    with ptan.common.utils.RewardTracker(writer) as tracker:
        with ptan.common.utils.TBMeanTracker(writer, batch_size=10) as tb_tracker:
            while True:
                frame_idx += 1
                # 从经验缓冲区执行一轮游戏或者执行游戏过程中采集到指定长度的游戏数据
                buffer.populate(1)
                # 提取目前位置所有的记录
                rewards_steps = exp_source.pop_rewards_steps()
                if rewards_steps:
                    # 记录当前的训练进度并判断是否达到了奖励目标
                    rewards, steps = zip(*rewards_steps)
                    tb_tracker.track("episode_steps", steps[0], frame_idx)
                    tracker.reward(rewards[0], frame_idx)

                if len(buffer) < REPLAY_INITIAL:
                    continue

                # 从缓冲区里面采样数据
                batch = buffer.sample(BATCH_SIZE)
                states_v, actions_v, rewards_v, dones_mask, last_states_v = common.unpack_batch_ddqn(batch, device)

                # train critic
                crt_opt.zero_grad()
                act_opt.zero_grad()
                # act_crt_net.set_train_action(True)
                # 根据状态和动作，得到评价，这里是根据实际游戏的状态和动作获取评价
                _, q_v = act_crt_net(states_v, actions_v)
                # 使用目标动作预测网路，根据下一个状态预测执行的动作
                last_act_v, q_last_v = tgt_act_crt_net.target_model(last_states_v)
                # 如果是结束状态则将奖励置为0
                q_last_v[dones_mask.bool()] = 0.0
                # 计算Q值 bellman公式
                q_ref_v = rewards_v.unsqueeze(dim=-1) + q_last_v * GAMMA
                # 计算预测的当前Q值和Bellman计算的到的Q值之间的差异
                # 并更新梯度 这里的方式就和之前的Q值单元的一致
                critic_loss_v = F.mse_loss(q_v, q_ref_v.detach())
                critic_loss_v.backward()
                crt_opt.step()

                # act_crt_net.set_train_action(False)
                tb_tracker.track("loss_critic", critic_loss_v, frame_idx)
                tb_tracker.track("critic_ref", q_ref_v.mean(), frame_idx)


                # 预测动作
                cur_actions_v, cur_actions_qvals = act_crt_net(states_v)
                actor_loss_v = -cur_actions_qvals
                actor_loss_v = actor_loss_v.mean()
                actor_loss_v.backward()
                act_opt.step()
                # act_crt_net.set_train_qvalue(False)
                tb_tracker.track("loss_actor", actor_loss_v, frame_idx)

                # 将训练网路同步到目标网络上，但是这里是每次都同步，与之前每隔n步同步一次不同
                # 这里之所以这样做，是根据测试可知，每次都同步，并使用较小的权重进行同步
                # 缓存的同步效果更好，并且能够保持平滑的更新
                tgt_act_crt_net.alpha_sync(alpha=1 - 1e-3)

                if frame_idx % TEST_ITERS == 0:
                    # 测试并保存最好测试结果的庶数据
                    ts = time.time()
                    rewards, steps = test_net(act_crt_net, test_env, device=device)
                    print("Test done in %.2f sec, reward %.3f, steps %d" % (
                        time.time() - ts, rewards, steps))
                    common.save_best_model(rewards, act_crt_net.state_dict(), save_path, "a2c-best", keep_best=10)
                    writer.add_scalar("test_reward", rewards, frame_idx)
                    writer.add_scalar("test_steps", steps, frame_idx)
                    checkpoint = {
                        "act_crt_net": act_crt_net.state_dict(),
                        "tgt_act_crt_net": tgt_act_crt_net.target_model.state_dict(),
                        "act_opt": act_opt.state_dict(),
                        "crt_opt": crt_opt.state_dict(),
                        "frame_idx": frame_idx
                    }
                    common.save_checkpoints(frame_idx, checkpoint, save_path, "ddpg-simple", keep_last=5)



    pass
