import numpy as np

from environ.env_5bits_2048 import Env_2048
from tuple_17_4.network import Tuple_17_4


class Q_Learning():
    def __init__(self, lr) -> None:
        self.env = Env_2048()
        self.lr = lr

        up_q_table    = Tuple_17_4()
        down_q_table  = Tuple_17_4()
        left_q_table  = Tuple_17_4()
        right_q_table = Tuple_17_4()
        self.q_table = [up_q_table, down_q_table, left_q_table, right_q_table]


    def evaluate(self, board:np.array, available)->int:
        up_q    = self.q_table[0].get_value(board)
        down_q  = self.q_table[1].get_value(board)
        left_q  = self.q_table[2].get_value(board)
        right_q = self.q_table[3].get_value(board)

        all_q = np.array([[0, 1, 2, 3], [up_q, down_q, left_q, right_q]])
        available_all_q = all_q[:, available]

        print(all_q)
        _index = np.argmax(available_all_q[1])

        return int(available_all_q[0, _index]), available_all_q[1, _index]


    def learn(self, action, q, reward, next_board:np.array):
        available = self.env.available_move(next_board)
        
        if(np.any(available)):
            _, next_q = self.evaluate(next_board, available)
        else:
            next_q = 0.0

        delta = reward+next_q - q
        print("delta: ", delta)
        # delta = np.clip(a=delta, a_min=-10, a_max=10)   ###
        self.q_table[action].update_value(delta*self.lr)
        return available


if __name__ == '__main__':
    q_learn = Q_Learning(0.001)

    for games in range(1, int(5e6)+1):
        move_num = 0
        penalty = 0
        board, p = q_learn.env.init_board()
        penalty += p
        available = q_learn.env.available_move(board)

        while True:
            print(".....................")
            print("move_num: ", move_num)
            print(board)

            action, q = q_learn.evaluate(board, available) # 初始棋不能为终止棋
            move_board = q_learn.env.execute_move(action, board)
            print(".....................")
            print(available)
            print("choose action: ", action)
            print(move_board)
            move_num += 1    # move num

            reward = q_learn.env.get_score(move_board, 0) - q_learn.env.get_score(board, 0)
            # reward = reward if not reward else np.log2(reward)
            print("reward : ", reward)
            next_board, p = q_learn.env.insert_tile(move_board)
            penalty += p

            available = q_learn.learn(action, q, reward, next_board)
            board = next_board

            if(np.any(available)):
                continue
            else:
                print(".....................")
                print("games   : ", games)
                print("move num: ", move_num)
                print("max_rank: ", np.max(board))
                print("AllScore: ", q_learn.env.get_score(board, penalty))
                break


