import numpy as np

from env.py2048 import board
from n_tuple.network import Net, tables


score_list = []
board_list = []
def make_statistic(n, score, board, unit=1000):
    score_list.append(score)
    board_list.append(max(board.tile))
    if(not n%unit):
        max_score = int(np.max(score_list))
        mean_score = int(np.mean(score_list))
        print(n, "\t", "mean =", mean_score, "\t", "max =", max_score)
        print("parameter \t", "mean =", np.mean([np.mean(t) for t in tables]), 
                              "\t", "min =",  np.min([np.min(t) for t in tables]),
                              "\t", "max =",  np.max([np.max(t) for t in tables]))

        max_tile_count = [0]*16
        for i in range(16):
            max_tile_count[i] = board_list.count(i)

        for i in range(16):
            if(max_tile_count[i]!=0):
                print("\t", "{:8d}".format((1 << i) & -2), " \t ", end="")
                print("{:6.1f}% \t ({:4.1f}%)".format(sum(max_tile_count[i:])/unit*100, max_tile_count[i]/unit*100))
        score_list.clear()
        board_list.clear()



def learn(path, lr = 0.1):
    # after, est_state, reward, est_value, r_est_value

    _, est_state, reward, est_value, _ = path.pop()
    error = 0 - est_value
    est_value = est_state.update(lr*error)

    while path:
        _, next_est_state, next_reward, next_est_value, _ = path.pop()

        error = reward+est_value - next_est_value
        # print("error: ", error)
        # print(next_est_value)
        next_est_value2 = next_est_state.update(lr*error)
        # print(next_est_value2)

        reward = next_reward
        est_value = next_est_value2


games = 100000


for game in range(games):
    score = 0
    step = 0
    path = []
    b_board = board().popup().popup()

    while True:
        # print(b_board)
        # after, reward
        moves = [b_board.up(), b_board.right(), b_board.down(), b_board.left()]
        new_moves = []
        for after, reward in moves:
            if reward == -1:
                continue
            est_state = Net(after.tile)
            est_value = est_state.estimate()
            new_moves.append((after, est_state, reward, est_value, reward+est_value))
            

        if not new_moves:
            learn(path)

            # print(step)
            # print(score)
            # print(max(b_board.tile))
            make_statistic(game+1, score, b_board)
            break
        
        after, est_state, reward, est_value, r_est_value = max(new_moves, key = lambda move: move[-1])

        path.append((after, est_state, reward, est_value, r_est_value))
        b_board = after.popup()
        score += reward
        step += 1

