import random
import gym
import gym.spaces
import numpy as np
import traceback
import pprint

class GridEnv1(gym.Env):
    '''
    长度为10的线段，每次只能左右移动，节点标为0..9,
    起点为0，终点为9，超过100步则死亡-100
    到达9则胜利+100
    '''
    def __init__(self,env_config):
        self.action_space = gym.spaces.Discrete(2)
        self.observation_space = gym.spaces.Box(np.array([0]),np.array([9]))
        self.reset()

    def reset(self):
        '''
        :return: state
        '''
        self.observation = [0]
        #self.reward = 10
        self.done=False
        self.step_num=0
        return [0]

    def step(self, action)->tuple:
        '''
        :param action:
        :return: tuple ->[observation，reward，done，info]
        '''
        #pprint.pprint(traceback.extract_stack())
        if action==0:
            action=-1
        self.observation[0]+=action
        self.step_num+=1
        reward=-1.0
        if self.step_num>100 or self.observation[0]<0:
            reward=-100.0
            self.done=True
            #print('last %d action %d now %d' % (self.observation[0] - action, action, self.observation[0]))
            return self.observation,reward,self.done, {}


        if self.observation[0]==9:
            reward=100.0
            self.done=True
        #print('last %d action %d now %d'%(self.observation[0]-action,action,self.observation[0]))
        return self.observation,reward,self.done,{}


    def render(self, mode='human'):
        pass



# import ray
# from ray.rllib.agents.dqn import DQNTrainer

# ### grid_od = GridEnv1({})
# ray.init()
# trainer = DQNTrainer( env=GridEnv1, config={'framework': 'tfe', } )

# for i in range(3):
    # result = trainer.train()
























