"""
单线程ppo
"""
from bdtime import tt
import gym
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.distributions.categorical import Categorical
from torch.utils.tensorboard import SummaryWriter
import os
import random
import time
import argparse
from distutils.util import strtobool
from tools.remove_temp_file import remove_path
from torch.nn import functional as F
from torchvision import transforms
from PIL import Image


os.environ["SDL_VIDEODRIVER"] = "dummy"


def parse_args():
    # fmt: off
    parser = argparse.ArgumentParser()
    parser.add_argument("--exp-name", type=str, default=os.path.basename(__file__).rstrip(".py"),
        help="the name of this experiment")

    parser.add_argument("--remove-log-dir", type=lambda x: bool(strtobool(x)), default=False, nargs="?", const=True,
                        help="if toggled, will remove tensorboard's existed log_dir")

    parser.add_argument("--seed", type=int, default=1,
        help="seed of the experiment")
    parser.add_argument("--torch-deterministic", type=lambda x: bool(strtobool(x)), default=True, nargs="?", const=True,
        help="if toggled, `torch.backends.cudnn.deterministic=False`")
    parser.add_argument("--cuda", type=lambda x: bool(strtobool(x)), default=True, nargs="?", const=True,
        help="if toggled, cuda will be enabled by default")
    parser.add_argument("--track", type=lambda x: bool(strtobool(x)), default=False, nargs="?", const=True,
        help="if toggled, this experiment will be tracked with Weights and Biases")
    parser.add_argument("--wandb-project-name", type=str, default="cleanRL",
        help="the wandb's project name")
    parser.add_argument("--wandb-entity", type=str, default=None,
        help="the entity (team) of wandb's project")

    # default__capture_video = False
    default__capture_video = True
    parser.add_argument("--capture-video", type=lambda x: bool(strtobool(x)), default=default__capture_video, nargs="?", const=True,
        help="weather to capture videos of the agent performances (check out `videos` folder)")

    # Algorithm specific arguments
    parser.add_argument("--env-id", type=str, default="CartPole-v1",
        help="the id of the environment")
    parser.add_argument("--total-timesteps", type=int, default=500000,
        help="total timesteps of the experiments")
    parser.add_argument("--num-envs", type=int, default=4,
        help="the number of parallel game environments")
    parser.add_argument("--num-steps", type=int, default=128,
        help="the number of steps to run in each environment per policy rollout")

    # parser.add_argument("--learning-rate", type=float, default=2.5e-4, help="the learning rate of the optimizer")
    parser.add_argument("--learning-rate", type=float, default=0.001, help="the learning rate of the optimizer")
    parser.add_argument("--anneal-lr", type=lambda x: bool(strtobool(x)), default=True, nargs="?", const=True,
        help="Toggle learning rate annealing for policy and value networks")
    parser.add_argument("--min-learning-rate", type=float, default=-1,
                        help="the min learning rate of the optimizer,"
                             "if anneal_lr toggled. "
                             "if set it to -1, it will become 1/2 * learning_rate, "
                             "if set 0, it will not work.")

    parser.add_argument("--gae", type=lambda x: bool(strtobool(x)), default=True, nargs="?", const=True,
        help="Use GAE for advantage computation")
    parser.add_argument("--gamma", type=float, default=0.99,
        help="the discount factor gamma")
    parser.add_argument("--gae-lambda", type=float, default=0.95,
        help="the lambda for the general advantage estimation")
    parser.add_argument("--num-minibatches", type=int, default=4,
        help="the number of mini-batches")
    parser.add_argument("--update-epochs", type=int, default=4,
        help="the K epochs to update the policy")
    parser.add_argument("--norm-adv", type=lambda x: bool(strtobool(x)), default=True, nargs="?", const=True,
        help="Toggles advantages normalization")
    parser.add_argument("--clip-coef", type=float, default=0.2,
        help="the surrogate clipping coefficient")
    parser.add_argument("--clip-vloss", type=lambda x: bool(strtobool(x)), default=True, nargs="?", const=True,
        help="Toggles whether or not to use a clipped loss for the value function, as per the paper.")
    parser.add_argument("--ent-coef", type=float, default=0.01,
        help="coefficient of the entropy")
    parser.add_argument("--vf-coef", type=float, default=0.5,
        help="coefficient of the value function")
    parser.add_argument("--max-grad-norm", type=float, default=0.5,
        help="the maximum norm for the gradient clipping")
    parser.add_argument("--target-kl", type=float, default=None,
        help="the target KL divergence threshold")
    args = parser.parse_args()
    args.batch_size = int(args.num_envs * args.num_steps)
    args.minibatch_size = int(args.batch_size // args.num_minibatches)
    # fmt: on
    return args


def make_env(env_id, seed, idx, capture_video, run_name):
    def thunk():
        env = gym.make(env_id)
        env = gym.wrappers.RecordEpisodeStatistics(env)
        if capture_video:
            if idx == 0:
                env = gym.wrappers.RecordVideo(env, f"videos/{run_name}")
        env.seed(seed)
        env.action_space.seed(seed)
        env.observation_space.seed(seed)
        return env

    return thunk


def auto_run_tensorboard_server(log_dir, port=8123, open_browser=False):
    from shells import kill_port
    import os

    print("****** start time:", tt.get_current_beijing_time_str())

    mylog_cmd = f"tensorboard --logdir={log_dir} --port {port}"
    kill_port(port)
    tt.sleep(0.2)
    print('\n\n ====== 清理tensorboard的旧端口完毕 ======\n\n')

    mylog_url = f"http://localhost:{port}/"
    print(f'*** mylog_cmd: {mylog_cmd}')
    print(f'*** mylog_url: {mylog_url}')

    # 后台运行
    import subprocess
    import os
    import platform

    subprocess.Popen(mylog_cmd, shell=True)

    if open_browser:
        # if args.autoOpenUrl:
        if platform.system() == "Windows":
            start_cmd = 'start'
        else:
            start_cmd = 'open'
        os.system(f'{start_cmd} {mylog_url}')
        # os.open(mylog_url)

    # os.system(mylog_cmd)


def layer_init(layer, std=np.sqrt(2), bias_const=0.0):
    torch.nn.init.orthogonal_(layer.weight, std)
    torch.nn.init.constant_(layer.bias, bias_const)
    return layer


class Residual(nn.Module):  #@save
    def __init__(self, input_channels, num_channels,
                 use_1x1conv=False, strides=1):
        super().__init__()
        self.conv1 = nn.Conv2d(input_channels, num_channels,
                               kernel_size=3, padding=1, stride=strides)
        self.conv2 = nn.Conv2d(num_channels, num_channels,
                               kernel_size=3, padding=1)
        if use_1x1conv:
            self.conv3 = nn.Conv2d(input_channels, num_channels,
                                   kernel_size=1, stride=strides)
        else:
            self.conv3 = None
        self.bn1 = nn.BatchNorm2d(num_channels)
        self.bn2 = nn.BatchNorm2d(num_channels)

    def forward(self, X):
        Y = F.relu(self.bn1(self.conv1(X)))
        Y = self.bn2(self.conv2(Y))
        if self.conv3:
            X = self.conv3(X)
        Y += X
        return F.relu(Y)


def resnet_block(input_channels, num_channels, num_residuals, first_block=False):
    blk = []
    for i in range(num_residuals):
        if i == 0 and not first_block:
            blk.append(Residual(input_channels, num_channels,
                                use_1x1conv=True, strides=2))
        else:
            blk.append(Residual(num_channels, num_channels))
    return blk


class Agent(nn.Module):
    def __init__(self, envs, single_observation_space__shape):
        super().__init__()
        if isinstance(single_observation_space__shape, tuple):
            critic_layer_0 = layer_init(nn.Linear(np.array(envs.single_observation_space.shape).prod(), 64))
            actor_layer_0 = layer_init(nn.Linear(np.array(envs.single_observation_space.shape).prod(), 64))
            self.critic = nn.Sequential(
                critic_layer_0,
                nn.Tanh(),
                layer_init(nn.Linear(64, 64)),
                nn.Tanh(),
                layer_init(nn.Linear(64, 1), std=1.0),
            )
            self.actor = nn.Sequential(
                actor_layer_0,
                nn.Tanh(),
                layer_init(nn.Linear(64, 64)),
                nn.Tanh(),
                layer_init(nn.Linear(64, envs.single_action_space.n), std=0.01),
            )
        else:
            in_channels = 3  # 输入通道数
            num_actions = envs.single_action_space.n

            out_channels = 32  # 输出通道数

            num_resnet_block_layers = 1
            b1 = nn.Sequential(nn.Conv2d(in_channels, 32, kernel_size=7, stride=3, padding=3),
                               nn.ReLU(), nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
            b2 = nn.Sequential(*resnet_block(32, 32, num_resnet_block_layers, first_block=True))
            b5 = nn.Sequential(*resnet_block(32, out_channels, num_resnet_block_layers))

            # from tools.attentions import CBAM
            # self.attention_layer = CBAM(channel=out_channels)
            from tools.attentions import MultiHeadAttention
            num_heads = 1
            num_hiddens = 105
            self.attention_layer = MultiHeadAttention(num_hiddens, num_hiddens, num_hiddens, num_hiddens, num_heads, dropout=0.5)

            out_features = 128
            self.common_layer = nn.Sequential(
                # b1, b2, b3, b4, b5,
                # b1, b2, b3, b5,
                # b1, b2, b5,
                b1, self.attention_layer, b2, b5,

                # --- ok
                nn.AdaptiveAvgPool2d((4, 8)),
                # self.attention_layer,
                nn.Flatten(),

                nn.ReLU(),
                nn.Linear(in_features=1024, out_features=out_features),
                nn.ReLU(),
            )
            # print(self.common_layer)
            # from torchvision.models import resnet18
            # model_resnet18 = resnet18(pretrained=False)
            # model_resnet18
            # for param in finetune_net.parameters():
            #     param.requires_grad = False

            # self.critic_linear = nn.Linear(out_channels, 1)
            # self.actor_linear = nn.Linear(out_channels, num_actions)

            self.critic_linear = nn.Linear(out_features, 1)
            self.actor_linear = nn.Linear(out_features, num_actions)

            self.critic = nn.Sequential(self.common_layer, self.critic_linear)
            self.actor = nn.Sequential(self.common_layer, self.actor_linear)

            if 1:
                # X = torch.rand(size=(1, 3, 224, 224))
                X = torch.rand(size=(1, 3, image_size[1], image_size[2]))
                X.shape

                # X = torch.rand(size=(1, 3, 128, 128))
                # 初始化参数计数器
                total_params = 0

                # 遍历模型的每个参数并累加参数数量
                # for param in model.parameters():
                with torch.no_grad():
                    for layer in self.common_layer:
                        X = layer(X)
                        layer_params = 0
                        for param in layer.parameters():
                            layer_params += param.numel()
                        total_params += layer_params
                        print('layer_params:', layer_params, '---', layer.__class__.__name__, 'output shape:\t', X.shape)
                print('total_params:', total_params)
                self.total_params = total_params

            self._initialize_weights()

            if 0:

                # self.conv1 = nn.Conv2d(in_channels, 32, 3, stride=2, padding=1)
                # self.conv2 = nn.Conv2d(32, 32, 3, stride=2, padding=1)
                # self.conv3 = nn.Conv2d(32, 32, 3, stride=2, padding=1)
                # self.conv4 = nn.Conv2d(32, out_channels, 3, stride=2, padding=1)
                # # self.lstm = nn.LSTMCell(32 * 6 * 6, 512)
                # self.common_layer = nn.Sequential(
                #     self.conv1, nn.ReLU(), self.conv2, nn.ReLU(), self.conv3, nn.ReLU(), self.conv4, nn.ReLU(),
                #     nn.AdaptiveAvgPool2d((1, 1)),
                #     nn.Flatten(),
                # )

                # self.critic_linear = nn.Linear(out_channels, 1)
                # self.actor_linear = nn.Linear(out_channels, num_actions)

                # critic = nn.Sequential(self.common_layer, self.critic_linear)
                # actor = nn.Sequential(self.common_layer, self.actor_linear)
                pass

    def _initialize_weights(self):
        for module in self.modules():
            if isinstance(module, nn.Conv2d) or isinstance(module, nn.Linear):
                nn.init.xavier_uniform_(module.weight)
                # nn.init.kaiming_uniform_(module.weight)
                if module.bias is not None:
                    nn.init.constant_(module.bias, 0)
            elif isinstance(module, nn.LSTMCell):
                nn.init.constant_(module.bias_ih, 0)
                nn.init.constant_(module.bias_hh, 0)

    def get_value(self, x):
        # return self.critic(x)
        x = self.common_layer(x)
        return self.critic_linear(x)

    def get_action_and_value(self, x, action=None):
        # x = F.relu(self.conv1(x))
        # x = F.relu(self.conv2(x))
        # x = F.relu(self.conv3(x))
        # x = F.relu(self.conv4(x))
        # hx, cx = self.lstm(x.view(x.size(0), -1), (hx, cx))
        # return self.actor_linear(hx), self.critic_linear(hx), hx, cx
        # x = x.to(device)

        # --- common_layer
        try:
            x = self.common_layer(x)
        except Exception as e:
            print('******* Error on common_layer(x), x.shape:', x.shape)
            raise e
        logits = self.actor_linear(x)
        probs = Categorical(logits=logits)
        if action is None:
            action = probs.sample()
        return action, probs.log_prob(action), probs.entropy(), self.critic_linear(x)

        # logits = self.actor(x)
        # probs = Categorical(logits=logits)
        # if action is None:
        #     action = probs.sample()
        # return action, probs.log_prob(action), probs.entropy(), self.critic(x)


def _get_envs_render_with_no_pretreat(envs, mode='rgb_array', plot_image=False):
    global image_size
    num_envs = args.num_envs
    res = torch.zeros([num_envs, image_size[0], image_size[1], image_size[2]])
    for i in range(num_envs):
        # break
        env_i = envs.envs[i]
        _res = env_i.render(mode=mode)
        if _res is None:
            continue
        # import torchvision

        # (new_height, new_width) = (224, 224)
        train_transform = transforms.Compose([
            transforms.Resize(image_size[1:]),  # 将图像缩放到指定大小
            # transforms.Grayscale(num_output_channels=1),
            transforms.ToTensor(),  # 将图像转换为张量
            # transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),  # 归一化
        ])
        # 将 NumPy 数组转换为 PIL Image
        pil_image = Image.fromarray(_res)
        # 应用转换到图像
        transformed_image: torch.FloatTensor = train_transform(pil_image)
        # 添加一个维度来创建批量
        batched_image = transformed_image.unsqueeze(0)
        # transformed_image.shape
        # batched_image.shape
        batched_image = batched_image.to(device)
        res[i] = batched_image

        import matplotlib.pyplot as plt

        if plot_image:
            # transformed_image.shape
            print('--- transformed_image.shape:', transformed_image.shape)
            plt.imshow(transformed_image.permute(1, 2, 0))
            plt.show()
    res = res.to(device)
    return res


def _get_envs_render_with_screen_pretreat(envs, mode='rgb_array', plot_image=False):
    global image_size
    num_envs = args.num_envs
    res = torch.zeros([num_envs, image_size[0], image_size[1], image_size[2]])
    for i in range(num_envs):
        # break
        env_i = envs.envs[i]
        batched_image = get_screen(env_i)
        # batched_image.shape
        # res.shape
        res[i] = batched_image
        # _res = env_i.render(mode=mode)
    res = res.to(device)
    return res


def get_screen(env):
    # Returned screen requested by gym is 400x600x3, but is sometimes larger
    # such as 800x1200x3. Transpose it into torch order (CHW).
    def get_cart_location(env, screen_width):
        world_width = env.x_threshold * 2
        scale = screen_width / world_width
        return int(env.state[0] * scale + screen_width / 2.0)  # MIDDLE OF CART

    screen = env.render(mode='rgb_array').transpose((2, 0, 1))
    # Cart is in the lower half, so strip off the top and bottom of the screen
    _, screen_height, screen_width = screen.shape
    screen = screen[:, int(screen_height*0.4):int(screen_height * 0.8)]
    view_width = int(screen_width * 0.6)
    cart_location = get_cart_location(env, screen_width)
    if cart_location < view_width // 2:
        slice_range = slice(view_width)
    elif cart_location > (screen_width - view_width // 2):
        slice_range = slice(-view_width, None)
    else:
        slice_range = slice(cart_location - view_width // 2,
                            cart_location + view_width // 2)
    # Strip off the edges, so that we have a square image centered on a cart
    screen = screen[:, :, slice_range]
    # Convert to float, rescale, convert to torch tensor
    # (this doesn't require a copy)
    screen = np.ascontiguousarray(screen, dtype=np.float32) / 255
    screen = torch.from_numpy(screen)

    resize = transforms.Compose([transforms.ToPILImage(),
                        # transforms.Resize(40, interpolation=Image.CUBIC),
                         transforms.Resize(image_size[1:], interpolation=Image.BICUBIC),  # 将图像缩放到指定大小
                        transforms.ToTensor()])

    res = resize(screen).unsqueeze(0).to(device)
    # screen.shape
    # res.shape
    return res


def main(args):
    if flag__remove_dirs or args.remove_log_dir:
        tt.tqdm_sleep(desc="****** Warning: flag__remove_dirs 将清除其它实验记录! 确定继续?", T=6)
        remove_dir_ls = ['videos', 'runs']
        assert isinstance(remove_dir_ls, list), 'remove_dir_ls必须为list类型!'
        for remove_dir in remove_dir_ls:
            if os.path.exists(remove_dir):
                remove_path(remove_dir, keep_external_folder=True)
        tt.sleep(1)

    run_name = f"{args.env_id}__{args.exp_name}__{args.seed}__{tt.get_current_beijing_time_str(tt.common_date_time_formats.s_int)}__{random.randint(1000, 9999)}"
    log_dir = f"runs/{run_name}"

    if args.track:
        import wandb

        wandb.init(
            project=args.wandb_project_name,
            entity=args.wandb_entity,
            sync_tensorboard=True,
            config=vars(args),
            name=run_name,
            monitor_gym=True,
            save_code=True,
        )
    writer = SummaryWriter(log_dir)

    # auto_run_tensorboard_server(log_dir, open_browser=True)
    # auto_run_tensorboard_server(log_dir, open_browser=False)

    # TRY NOT TO MODIFY: seeding
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.backends.cudnn.deterministic = args.torch_deterministic

    # default__capture_video
    envs = gym.vector.SyncVectorEnv(
        [make_env(args.env_id, args.seed + i, i, args.capture_video, run_name) for i in range(args.num_envs)]
    )
    assert isinstance(envs.single_action_space, gym.spaces.Discrete), "only discrete action space is supported"

    print(f'*** tensorboard log_dir: {log_dir} --- device: {device}')

    # image_size = [3, 224, 224]  # channels * width * height

    # flag__use_rgb_array = False
    if not flag__use_rgb_array:
        single_observation_space__shape = envs.single_observation_space.shape
    else:
        single_observation_space__shape = image_size

    agent = Agent(envs, single_observation_space__shape).to(device)
    optimizer = optim.Adam(agent.parameters(), lr=args.learning_rate, eps=1e-5)

    # ALGO Logic: Storage setup
    if not flag__use_rgb_array:
        obs = torch.zeros((args.num_steps, args.num_envs) + single_observation_space__shape).to(device)
    else:
        obs = torch.zeros([args.num_steps, args.num_envs] + single_observation_space__shape).to(device)
    print('=== obs.shape:', obs.shape)
    actions = torch.zeros((args.num_steps, args.num_envs) + envs.single_action_space.shape).to(device)
    print('=== actions.shape:', actions.shape)
    logprobs = torch.zeros((args.num_steps, args.num_envs)).to(device)
    rewards = torch.zeros((args.num_steps, args.num_envs)).to(device)
    dones = torch.zeros((args.num_steps, args.num_envs)).to(device)
    values = torch.zeros((args.num_steps, args.num_envs)).to(device)

    # obs.shape
    # obs.flatten(start_dim=0).shape
    # obs.flatten(start_dim=1).shape
    # obs.flatten(start_dim=2).shape
    #
    # envs.single_observation_space.shape

    # TRY NOT TO MODIFY: start the game
    global_step = 0
    start_time = time.time()
    # next_obs = torch.Tensor(envs.reset()).to(device)
    if not flag__use_rgb_array:
        next_obs = torch.Tensor(envs.reset()).to(device)
    else:
        envs.reset()
        # last_screen = torch.zeros([args.num_envs, image_size[0], image_size[1], image_size[2]]).to(device)
        last_screen = get_envs_render(envs)
        current_screen = get_envs_render(envs)
        next_obs = current_screen - last_screen

    next_done = torch.zeros(args.num_envs).to(device)
    num_updates = args.total_timesteps // args.batch_size
    # next_obs.shape

    with torch.no_grad():
        tt.__init__()
        times = 100
        for i in range(times):
            # next_obs.shape
            agent.get_action_and_value(next_obs)
        print('\n\ntt.now:', round(tt.now() / times, 3), f'--- next_obs.shape: {next_obs.shape} --- total_params:', agent.total_params)

    if args.min_learning_rate == -1:  # 变为learning_rate一半
        args.min_learning_rate = args.learning_rate / 2


    if hasattr(agent, 'total_params'):
        args.total_params = agent.total_params
    _text_args = "|param|value|\n|-|-|\n%s" % ("\n".join([f"|{key}|{value}|" for key, value in vars(args).items()]))
    print('=== _text_args:\n', _text_args)
    writer.add_text(
        "hyperparameters",
        _text_args,
    )
    for update in range(1, num_updates + 1):
        # break
        # Annealing the rate if instructed to do so.

        last_screen = get_envs_render(envs)
        current_screen = get_envs_render(envs)
        next_obs = current_screen - last_screen  # 这里应该给agent传入当前速度的信息

        if args.anneal_lr:
            frac = 1.0 - (update - 1.0) / num_updates
            lrnow = frac * args.learning_rate
            if args.min_learning_rate > 0:
                lrnow = max(lrnow, args.min_learning_rate)
            optimizer.param_groups[0]["lr"] = lrnow

        smoothing_return = None
        _episodic_return_ls = []
        for step in range(0, args.num_steps):
            # break
            global_step += 1 * args.num_envs
            obs[step] = next_obs
            dones[step] = next_done

            # ALGO LOGIC: action logic
            with torch.no_grad():
                action, logprob, _, value = agent.get_action_and_value(next_obs)
                if flag__use_random_action:
                    action = envs.action_space.sample()  # 采取随机动作进行对比
                    action = torch.from_numpy(action).to(device)
                values[step] = value.flatten()
            actions[step] = action
            logprobs[step] = logprob

            # TRY NOT TO MODIFY: execute the game and log data.
            next_obs, reward, done, info = envs.step(action.cpu().numpy())
            if flag__use_rgb_array:
                # next_obs = get_envs_render(envs)
                last_screen = current_screen
                current_screen = get_envs_render(envs)
                next_obs = current_screen - last_screen  # 这里应该给agent传入当前速度的信息

                _indexes = np.where(done == True)[0]
                next_obs[_indexes] = torch.zeros([1, image_size[0], image_size[1], image_size[2]]).to(device)
                # next_obs[_indexes] = (torch.ones([1, image_size[0], image_size[1], image_size[2]]) * 255).to(device)

            rewards[step] = torch.tensor(reward).to(device).view(-1)
            # envs.render(mode='rgb_array')
            # rendered_images = envs.render(mode='rgb_array')
            # envs.render()
            # envs[0]
            # envs.get_env(0)
            # envs.envs[0].render(mode='rgb_array')

            next_obs, next_done = torch.Tensor(next_obs).to(device), torch.Tensor(done).to(device)

            for item in info:
                # break
                if "episode" in item.keys():
                    episodic_return = item['episode']['r']
                    print(f"global_step={global_step}, episodic_return={episodic_return}")
                    writer.add_scalar("charts/episodic_return", episodic_return, global_step)
                    writer.add_scalar("charts/episodic_length", item["episode"]["l"], global_step)
                    # _episodic_return += episodic_return / args.num_envs
                    _episodic_return_ls.append(episodic_return)
                    break

        # bootstrap value if not done
        with torch.no_grad():
            next_value = agent.get_value(next_obs).reshape(1, -1)
            if args.gae:  # the lambda for the general advantage estimation
                advantages = torch.zeros_like(rewards).to(device)
                lastgaelam = 0
                for t in reversed(range(args.num_steps)):
                    if t == args.num_steps - 1:
                        nextnonterminal = 1.0 - next_done
                        nextvalues = next_value
                    else:
                        nextnonterminal = 1.0 - dones[t + 1]
                        nextvalues = values[t + 1]
                    delta = rewards[t] + args.gamma * nextvalues * nextnonterminal - values[t]
                    advantages[
                        t] = lastgaelam = delta + args.gamma * args.gae_lambda * nextnonterminal * lastgaelam
                returns = advantages + values
                # returns.shape
            else:
                returns = torch.zeros_like(rewards).to(device)
                for t in reversed(range(args.num_steps)):
                    if t == args.num_steps - 1:
                        nextnonterminal = 1.0 - next_done
                        next_return = next_value
                    else:
                        nextnonterminal = 1.0 - dones[t + 1]
                        next_return = returns[t + 1]
                    returns[t] = rewards[t] + args.gamma * nextnonterminal * next_return
                advantages = returns - values

        # flatten the batch
        # obs.shape
        if not flag__use_rgb_array:
            b_obs = obs.reshape((-1,) + envs.single_observation_space.shape)
        else:
            b_obs = obs.reshape((-1,) + next_obs.shape[1:])
            # b_obs.shape
            # obs.shape
            # next_obs.shape

        b_logprobs = logprobs.reshape(-1)
        b_actions = actions.reshape((-1,) + envs.single_action_space.shape)
        b_advantages = advantages.reshape(-1)
        b_returns = returns.reshape(-1)
        b_values = values.reshape(-1)

        # Optimizing the policy and value network
        b_inds = np.arange(args.batch_size)
        clipfracs = []
        for epoch in range(args.update_epochs):
            np.random.shuffle(b_inds)
            for start in range(0, args.batch_size, args.minibatch_size):
                # break
                end = start + args.minibatch_size
                mb_inds = b_inds[start:end]

                _, newlogprob, entropy, newvalue = agent.get_action_and_value(b_obs[mb_inds], b_actions.long()[mb_inds])
                logratio = newlogprob - b_logprobs[mb_inds]
                ratio = logratio.exp()

                with torch.no_grad():
                    # calculate approx_kl http://joschu.net/blog/kl-approx.html
                    old_approx_kl = (-logratio).mean()
                    approx_kl = ((ratio - 1) - logratio).mean()
                    clipfracs += [((ratio - 1.0).abs() > args.clip_coef).float().mean().item()]

                mb_advantages = b_advantages[mb_inds]
                if args.norm_adv:
                    mb_advantages = (mb_advantages - mb_advantages.mean()) / (mb_advantages.std() + 1e-8)

                # Policy loss
                pg_loss1 = -mb_advantages * ratio
                pg_loss2 = -mb_advantages * torch.clamp(ratio, 1 - args.clip_coef, 1 + args.clip_coef)
                pg_loss = torch.max(pg_loss1, pg_loss2).mean()

                # Value loss
                newvalue = newvalue.view(-1)
                if args.clip_vloss:
                    v_loss_unclipped = (newvalue - b_returns[mb_inds]) ** 2
                    v_clipped = b_values[mb_inds] + torch.clamp(
                        newvalue - b_values[mb_inds],
                        -args.clip_coef,
                        args.clip_coef,
                    )
                    v_loss_clipped = (v_clipped - b_returns[mb_inds]) ** 2
                    v_loss_max = torch.max(v_loss_unclipped, v_loss_clipped)
                    v_loss = 0.5 * v_loss_max.mean()
                else:
                    v_loss = 0.5 * ((newvalue - b_returns[mb_inds]) ** 2).mean()

                entropy_loss = entropy.mean()
                loss = pg_loss - args.ent_coef * entropy_loss + v_loss * args.vf_coef

                optimizer.zero_grad()
                loss.backward()
                nn.utils.clip_grad_norm_(agent.parameters(), args.max_grad_norm)
                optimizer.step()

            if args.target_kl is not None:
                if approx_kl > args.target_kl:
                    break

        y_pred, y_true = b_values.cpu().numpy(), b_returns.cpu().numpy()
        var_y = np.var(y_true)
        explained_var = np.nan if var_y == 0 else 1 - np.var(y_true - y_pred) / var_y

        # TRY NOT TO MODIFY: record rewards for plotting purposes
        writer.add_scalar("charts/learning_rate", optimizer.param_groups[0]["lr"], global_step)
        writer.add_scalar("losses/value_loss", v_loss.item(), global_step)
        writer.add_scalar("losses/policy_loss", pg_loss.item(), global_step)
        writer.add_scalar("losses/entropy", entropy_loss.item(), global_step)
        writer.add_scalar("losses/old_approx_kl", old_approx_kl.item(), global_step)
        writer.add_scalar("losses/approx_kl", approx_kl.item(), global_step)
        writer.add_scalar("losses/clipfrac", np.mean(clipfracs), global_step)
        writer.add_scalar("losses/explained_variance", explained_var, global_step)
        writer.add_scalar("charts/SPS", int(global_step / (time.time() - start_time)), global_step)

        # smoothing_return = smoothing_return * smoothing_rate + sum(_episodic_return_ls) / len(_episodic_return_ls) * (1 - smoothing_rate) if len(_episodic_return_ls) else smoothing_return
        if len(_episodic_return_ls):
            _mean_episodic_return = sum(_episodic_return_ls) / len(_episodic_return_ls)
            if smoothing_return is None:
                smoothing_return = _mean_episodic_return
            else:
                smoothing_return = smoothing_return * smoothing_rate + _mean_episodic_return * (1 - smoothing_rate)
        if smoothing_return is not None:
            print(run_name, "--- SPS:", int(global_step / (time.time() - start_time)), f'--- smoothing_return[{smoothing_rate}]: {round(smoothing_return, 2)}')  # , 'mean_episode_return'


if __name__ == '__main__':
    args = parse_args()

    # args.exp_name = '随机动作'
    # args.exp_name = 'resnet_32通道_3层'
    # args.exp_name = 'resnet_layer1_big_image'
    flag__remove_dirs = 0
    smoothing_rate = 0.99

    device = torch.device("cuda" if torch.cuda.is_available() and args.cuda else "cpu")

    # image_size = [3, 128, 256]  # channels * width * height
    image_size = [3, 40, 90]  # channels * width * height
    # image_size = [3, 40, 60]  # channels * width * height
    # image_size = [3, 40, 60]  # channels * width * height
    # image_size = [3, 400, 600]  # channels * width * height
    # image_size = [3, 128, 192]  # channels * width * height
    print('--- image_size:', image_size)

    flag__screen_pretreat = 0  # 是否对屏幕截图进行预处理
    flag__use_random_action = 0  # 采用随机动作进行对比实验
    flag__use_rgb_array = True
    # flag__use_cnn = True

    get_envs_render = _get_envs_render_with_screen_pretreat if flag__screen_pretreat else _get_envs_render_with_no_pretreat

    args.flag__screen_pretreat = flag__screen_pretreat
    args.flag__use_random_action = flag__use_random_action
    args.flag__use_rgb_array = flag__use_rgb_array
    args.image_size = image_size
    main(args)
    1






