import argparse
import os
import random
import time
from distutils.util import strtobool

import gym
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.distributions.categorical import Categorical
from torch.utils.tensorboard import SummaryWriter

from stable_baselines3.common.atari_wrappers import (  # isort:skip
    ClipRewardEnv,
    EpisodicLifeEnv,
    FireResetEnv,
    MaxAndSkipEnv,
    NoopResetEnv,
)
from bdtime import tt


def make_env(env_id, seed, idx, capture_video, run_name):
    def thunk():
        env = gym.make(env_id)
        env = gym.wrappers.RecordEpisodeStatistics(env)
        if capture_video:
            if idx == 0:
                env = gym.wrappers.RecordVideo(env, f"videos/{run_name}")
        env = NoopResetEnv(env, noop_max=30)
        env = MaxAndSkipEnv(env, skip=4)
        env = EpisodicLifeEnv(env)
        if "FIRE" in env.unwrapped.get_action_meanings():
            env = FireResetEnv(env)
        env = ClipRewardEnv(env)
        env = gym.wrappers.ResizeObservation(env, (84, 84))
        env = gym.wrappers.GrayScaleObservation(env)
        env = gym.wrappers.FrameStack(env, 4)
        env.seed(seed)
        env.action_space.seed(seed)
        env.observation_space.seed(seed)
        return env

    return thunk

env_id = 'CartPole-v0'
save_trajectories = True
save_path = f'./trajectories__{env_id}.json'

recent_ls_length = 3  # n次达到`reward_threshold`则提前结束训练
max_step_in_per_epoch = 200  # 每局epoch最大step数


class MyWrapper(gym.Wrapper):

    def __init__(self, show=False):
        render_model = 'human' if show else 'rgb_array'
        env = gym.make(env_id, render_mode=render_model)
        super().__init__(env)
        self.env = env
        self.step_n = 0

    def reset(self):
        state, _ = self.env.reset()
        self.step_n = 0
        return state

    def step(self, action):
        state, reward, terminated, truncated, info = self.env.step(action)
        over = terminated or truncated

        #限制最大步数
        self.step_n += 1
        if self.step_n >= 200:
            truncated = True
            over = True

        # 没坚持到最后,扣分
        if over and self.step_n < 200:
            # reward = -10000000
            reward = 0

        return state, reward, over, truncated, info

    #打印游戏图像
    def show(self):
        from matplotlib import pyplot as plt
        plt.figure(figsize=(3, 3))
        plt.imshow(self.env.render())
        plt.show()


# env = MyWrapper()
env = MyWrapper(show=True)
reward_threshold = env.spec.reward_threshold

is_discrete_action = isinstance(env.action_space, gym.spaces.Discrete)
act_dim = env.action_space.n if is_discrete_action else env.action_space.shape[0]
state_dim = env.observation_space.shape[0]

env.reset()

max_epoch = 10

for epoch in range(max_epoch):
    for step in range(max_step_in_per_epoch):
        tt.sleep(0.1)
        action = random.randint(0, act_dim - 1)

        state, reward, over, truncated, info = env.step(action)

        # if over:
        #     break
        1


1
