#!/usr/bin/env python
# coding: utf-8

# In[1]:


import numpy as np
import pandas as pd
import time


# In[2]:


np.random.seed(2); #伪随机过程，数据随机，但随机过程一致


# In[57]:


N_STATES = 6;  #状态数
Actions = ['left' , 'right']; #行动
eposilon = 0.9; #参数
alpha = 0.1; #学习率
gammar = 0.9; #衰减度
max_eposilon = 13;#循环13次
fresh_time = 0.3;


# In[58]:


def build_q_table( n_states, actions):
    table = pd.DataFrame(
        np.zeros((n_states, len(actions))),
        columns = actions
    )
    print(table)
    return table


# In[59]:


build_q_table( N_STATES, Actions);


# In[60]:


def choose_action( state, q_table):
    state_actions = q_table.iloc[state, :] #取此时状态的一行
    if( np.random.uniform() > eposilon or state_actions.all()==0):
        action_name = np.random.choice(Actions);   #10%的情况或者全0，随机选择
    else:
        action_name = state_actions.argmax()  #90%选最大值
    return action_name; 


# In[65]:


def FeedBack(State, Action):
    if Action == "right":
        if State == N_STATES-2:
            States1 = "terminal";
            Reward = 1;
        else:
            States1 = State+1;
            Reward = 0;
    else:
        Reward = 0;
        if State == 0:
            States1 = State;
        else:
            States1 = State - 1;
    return States1, Reward;     
    


# In[ ]:





# In[66]:


def update_env(S, eposilon, step_counter):
    env_list = ['-']*(N_STATES-1)+['T'];
    if  S == 'terminal':
        interaction = 'Eposilon %s: total step = %s' % (eposilon +1, step_counter);
        print("\r{}".format(interaction),end=' ');
        time.sleep(2);
        print('\r              ', end='');
    else:
        env_list[S] = 'o';
        interaction = ' '.join(env_list);
        print("\r{}".format(interaction),end=' ');
        time.sleep(fresh_time);


# In[67]:


def rl():
    # main part of RL loop
    q_table = build_q_table(N_STATES, Actions)  #初始化
    for epsilon in range(max_eposilon):
        step_counter = 0
        S = 0
        is_terminated = False
        update_env(S, epsilon, step_counter)
        while not is_terminated:

            A = choose_action(S, q_table) #根据初始条件得到行为
            S_, R = FeedBack(S, A)  # 根据行为得到下一个状态以及回报值
            q_predict = q_table.loc[S, A] #得到行为预测值
            if S_ != 'terminal':
                q_target = R + gammar * q_table.iloc[S_, :].max()   # next state is not terminal
            else:
                q_target = R     # next state is terminal
                is_terminated = True    # terminate this episode

            q_table.loc[S, A] += alpha * (q_target - q_predict)  # update
            S = S_  # move to next state

            update_env(S, epsilon, step_counter+1)
            step_counter += 1
    return q_table


# In[ ]:


if __name__ == "__main__":
    q_table = rl()
    print('\r\nQ-table:\n')
    print(q_table)


# In[ ]:





# In[ ]:




