import numpy as np

# Q-Learning Agent ，通过一个 Qtable 来学习最佳策略
class QlerningAgent:
    Qtable = np.nan
    scanStp = 8
    obs_dim = [10, 6]
    action_dim = 25

    def __init__(self, path=None):
        self.obs_dim = [10, 6]
        self.obs_dim += [10] * 6
        self.Qtable = np.zeros(self.obs_dim + [self.action_dim],dtype=np.float16)
        if path is not None:
            self.Qtable = np.load(path)

    def choose_action(self, state):
        s = self.DiscrtObs(state)
        maxVal, maxAction = 0, 0
        t = self.getSubTableByR(s)
        for i in range(self.action_dim):
            if t[i] > maxVal:
                maxVal = t[i]
                maxAction = i
        return maxAction

    # 获取子表
    def getSubTableByR(self, s):
        return self.Qtable[s[0], s[1], s[2], s[3], s[4], s[5], s[6], s[7]]

    def learn(self, state, action, reward, next_state):
        lr = 0.1
        gamma = 0.9
        s = self.DiscrtObs(state)
        ns = self.DiscrtObs(next_state)
        t = self.getSubTableByR(s)
        nt = self.getSubTableByR(ns)
        self.Qtable[s[0], s[1], s[2], s[3], s[4], s[5], s[6], s[7], action] = t[
            action
        ] * (1 - lr) + lr * (reward + gamma * np.max(nt))

    # 对输入进行降维和离散化
    def DiscrtObs(self, obs):
        r = []
        r.append(int(((np.tanh(obs[0] / 50) + 1) / 2) * self.obs_dim[0]))
        r.append(int((np.tanh(obs[1]) + 1) / 2 * self.obs_dim[1]))
        for i in range(2, len(obs), self.scanStp):
            r.append(int(((obs[i]) / 100) ** 0.5 * self.obs_dim[2]))
        # 范围限定
        r[0] = np.clip(r[0], 0, self.obs_dim[0] - 1)
        r[1] = np.clip(r[1], 0, self.obs_dim[1] - 1)
        for i in range(2, len(r)):
            r[i] = np.clip(r[i], 0, self.obs_dim[2] - 1)
        return np.array(r)

    def SaveTable(self, path):
        np.save(path, self.Qtable)

    def LoadTable(self, path):
        self.Qtable = np.load(path)

if __name__ == "__main__":
    path = "models/Qtable.npy"
    agent = QlerningAgent()
    agent.SaveTable(path)