"""
This part of code is the Q learning brain, which is a brain of the agent.
All decisions are made in here.

View more on my tutorial page: https://morvanzhou.github.io/tutorials/
"""


import numpy as np


class QLearningTable:
    def __init__(self, actions, learning_rate=0.1, reward_decay=0.9, e_greedy=0.9):
        self.actions = actions  # a list
#        self.lr = learning_rate
#        self.gamma = reward_decay
#        self.epsilon = e_greedy # 修改为可变
        self.q_table1 = np.zeros((10, 10, len(self.actions)))


    def choose_action(self, s, rand):
        if rand:
            self.lr = 0.5
            self.gamma = 0.98
            self.epsilon = 0.8
        else:
            self.lr = 0.1
            self.gamma = 0.9
            self.epsilon = 0.98

        if np.random.uniform() < self.epsilon:
            state_action1 = self.q_table1[s[0]][s[1]]
            action1 = np.random.choice(np.where(state_action1 == np.max(state_action1))[0])
        else:
            action1 = np.random.choice(self.actions)
        return action1


    def learn(self, s, a, d, r, s_):
        q_predict1 = self.q_table1[s[0]][s[1]][int(a)]

        if d:  # 回合结束
            q_target1 = r
        else:  # next state is not terminal
            q_target1 = r + self.gamma * self.q_table1[s_[0]][s_[1]].max()
        self.q_table1[s[0]][s[1]][int(a)]  += self.lr * (q_target1 - q_predict1)  # update
