import pandas as pd
import numpy as np
import time

def showS(state,numState,episode,step):
        env=['-']*(numState-1)
        env.append('T')
        show='Episode: %s, step: %s     ' % (episode,step)
        if states.index(state)==numState-1 :
            env[states.index(state)]='*'
        else:
            env[states.index(state)]='o'
        env_s=''.join(env)
        print('\r{}'.format('                                                   '),end='')
        print('\r{}'.format(show+env_s),end='')
        if(states.index(state)==numState-1):
            show='Total steps: %s     ' % (step)
            print('\r{}'.format('                                               '),end='')
            print('\r{}'.format(show),end='')
            time.sleep(1)

def env_resp(state,action):
    index=states.index(state)
    reward=0
    if action=='left':
        if index!=0:
            index-=1
    else:
        if index!=numState-1:
            index+=1
    if index == numState-1:
        reward=1
    return states[index],reward

def chooseAction(state):
    policy=vTable.loc[state]
    if np.random.rand()<=epsilon or policy.all()==0 or policy.left==policy.right :
        action=np.random.choice(actionG)
    else:
        action=policy.idxmax()
    return action

def dl(state,newState,action,reward): #dl的目的就是为了更新价值表
    q_predict=vTable.loc[state,action] #将执行action前现在状态的价值看作是对现在到未来的预期价值，这个因为没有更新，所以可以看作action前对未来可获取价值的最佳估计
    if states.index(newState) != numState-1:
        q_real=reward+dis_r*vTable.loc[newState].max() #如果执行一步，当前状态的价值就是action后得到的价值+未来价值的折现
    else:
        q_real=reward #因为最后一个状态表示结束，最后一个状态到最后一个状态的价值预期应该恒等于0
    vTable.loc[state,action]=q_predict+lr*(q_real-q_predict) #通过实践对预测进行修正
    #上面整个过程就是理论指导实践，并通过实践反过来修正理论

numState=7
sIndex=list(range(numState))
states=['s'+str(i) for i in sIndex]
vTable=pd.DataFrame({'left': np.zeros(numState),
                     'right': np.zeros(numState)},
                  index=states)
actionG=['left','right']
epsilon=0.1 #学习时的0.1的概率使用随机方式
lr=0.1 #学习率
dis_r=0.9 #折现率

#初始化agent学习的外部环境
#state=states[np.random.randint(numState-1)] #随机初始化状态（不在终点）
state=states[2]
episodes=20 #学习20次
for episode in range(episodes):
    step=1
    isOver=False
    #state=states[np.random.randint(numState-1)] #随机初始化状态（不在终点）
    #showS(state,numState,episode,step)
    time.sleep(.3)
    while not isOver:
        #state=states[np.random.randint(numState-1)] #随机初始化状态（不在终点）
        action=chooseAction(state)
        state_,reward=env_resp(state,action)
        dl(state,state_,action,reward)
        state=state_
        if states.index(state) == numState-1:
            isOver=True
        step+=1
        showS(state,numState,episode,step)
        time.sleep(.3)
vTable
