import random
import numpy as np
import traceback
import pprint

import gym
import gym.spaces
from typing import Generic, Optional, SupportsFloat, Tuple, TypeVar, Union
from gym.utils import seeding
from bdtime import tt


ObsType = TypeVar("ObsType")
ActType = TypeVar("ActType")


class GridEnv1(gym.Env):
    '''
    长度为10的线段，每次只能左右移动，节点标为0..9,
    起点为0，终点为9，超过100步则死亡-100
    到达9则胜利+100
    '''
    def __init__(self, env_config=None):
        self.action_space = gym.spaces.Discrete(2)
        self.observation_space: gym.spaces.Box = gym.spaces.Box(np.array([-1]), np.array([9]))
        # self.observation_space: gym.spaces.Box = gym.spaces.Box(range=[-1, 1])
        self.reset()

    def reset(
            self,
            *,
            seed: Optional[int] = None,
            return_info: bool = False,
            options: Optional[dict] = None,
    ) -> Union[ObsType, Tuple[ObsType, dict]]:
        '''

        :return: state
        '''
        if seed is not None:
            self._np_random, seed = seeding.np_random(seed)

        self.observation = [0]
        #self.reward = 10
        self.done=False
        self.step_num=0
        return np.array([0], dtype=np.float32)

    def step(self, action)->tuple:
        '''

        :param action:
        :return: tuple ->[observation，reward，done，info]
        '''
        #pprint.pprint(traceback.extract_stack())
        if action == 0:
            action = -1

        # _state = self.observation
        self.observation[0] += action
        # self.observation = np.clip(self.observation, self.observation_space.low, self.observation_space.high)
        # state = self.observation
        # print('state: ', state, '--- _state:', _state)

        # if self.observation[0] == -1:
        #     print(1)
        #     # self.observation[0]
        #     self.observation_space.low
        # import math

        self.step_num += 1
        reward = -1.0

        if self.step_num > 100 or self.observation[0] < 0:
            reward=-100.0
            self.done=True
            #print('last %d action %d now %d' % (self.observation[0] - action, action, self.observation[0]))
            # ret = self.observation,reward,self.done, {}
            ret = np.array(self.observation, dtype=np.float32), reward, self.done, {}
            return ret

        if self.observation[0]==9:
            reward=100.0
            self.done=True
            #print('last %d action %d now %d'%(self.observation[0]-action,action,self.observation[0]))
        ret = np.array(self.observation, dtype=np.float32), reward, self.done, {}

        tt.sleep(0.1)
        return ret

    def render(self, mode='human'):
        pass


