import numpy as np
from rknnlite.api import RKNNLite
from decision_util import check_winner


def load_rknn_model(model_path):
    rknn_lite = RKNNLite(verbose=False)
    ret = rknn_lite.load_rknn(model_path)
    if ret != 0:
        raise RuntimeError("Load RKNN model failed")
    ret = rknn_lite.init_runtime(core_mask=RKNNLite.NPU_CORE_0)
    if ret != 0:
        raise RuntimeError("Init runtime environment failed")
    return rknn_lite


# 定义 MCTS 节点类
class MCTSNode:
    def __init__(self, board, prob=1.0, parent=None):
        # 当前节点对应的棋盘状态
        self.board = board
        # 父节点
        self.parent = parent
        # 子节点字典，键为落子位置，值为对应的子节点
        self.children = {}
        # 该节点的访问次数
        self.visits = 0
        # 该节点的状态价值
        self.Q = 0
        # 未尝试的落子位置列表
        self.untried_actions = [i for i, cell in enumerate(board) if cell == ""]
        # 存储神经网络预测的动作概率
        self.P = prob

    def is_fully_expanded(self):
        # 判断该节点是否已经完全扩展，即是否还有未尝试的落子位置
        return len(self.untried_actions) == 0

    def select_child(self, c_puct=1.4):
        best_score = -float("inf")
        best_child = None
        for action, child in self.children.items():
            U = c_puct * child.P * np.sqrt(self.visits) / (1 + child.visits)
            score = child.Q + U
            if score > best_score:
                best_score = score
                best_child = child
        return best_child

    def expand(self, action_probs):
        # 扩展一个未尝试的落子位置，创建一个新的子节点
        for action, prob in action_probs:
            board = self.board.copy()
            board[action] = self.board[-1]
            board[-1] = "O" if self.board[-1] == "X" else "X"
            self.children[action] = MCTSNode(board, prob, self)

    def backpropagate(self, state_value):
        # 反向传播模拟结果，更新节点的访问次数和累计胜利次数
        self.visits += 1
        self.Q += 1.0 * (state_value - self.Q) / self.visits
        if self.parent:
            # 递归更新父节点
            self.parent.backpropagate(-state_value)


class AlphaZeroRKPlayer:
    def __init__(
        self,
        model=None,
        policy_loss_fn=None,
        value_loss_fn=None,
        simulations=1000,
    ):
        self.model = model
        self.policy_loss_fn = policy_loss_fn
        self.value_loss_fn = value_loss_fn
        self.simulations = simulations

    def playout(self, node):
        while 1:
            if node.children == {}:
                # print("is_leaf, board: ", node.board)
                break
            node = node.select_child()

        state = np.array(
            [1.0 if cell == "X" else -1 if cell == "O" else 0.0 for cell in node.board],
            dtype=np.float32,
        )
        state = np.expand_dims(state, axis=0)
        pred_probs, pred_state_value = self.model.inference(inputs=[state])

        probs = pred_probs[0]
        state_value = pred_state_value[0][0]

        availables_action_probs = [
            (i, probs[i]) for i in range(len(node.board) - 1) if node.board[i] == ""
        ]
        # print('availables_action_probs: ', availables_action_probs)

        win = check_winner(node.board)
        # print("win: ", win)
        if win is None:
            node.expand(availables_action_probs)
        elif win == "tie":
            state_value = 0.0
            # print('tie board: ', node.board)
        elif win == node.board[-1]:
            state_value = -1.0
            # print('loss board: ', node.board)
        else:
            state_value = 1.0
            # print('win board: ', node.board)

        node.backpropagate(state_value)

    def softmax(self, x):
        probs = np.exp(x - np.max(x))
        probs /= np.sum(probs)
        return probs

    def get_action(self, node=None, board=None):
        board.append("O")  # 添加当前玩家
        node = MCTSNode(board)
        # 返回最优action以及所有action的概率
        for i in range(self.simulations):
            self.playout(node)

        act_visits = [(action, child.visits) for action, child in node.children.items()]
        actions, visits = zip(*act_visits)
        act_probs = self.softmax(np.log(np.array(visits) + 1e-10))

        # play 阶段选择概率最大的action
        print("act_visits: ", act_visits)
        move = max(act_visits, key=lambda x: x[1])[0]

        # 不合法action概率设为0
        res_probs = [0.0] * 9
        for i in range(len(actions)):
            res_probs[actions[i]] = act_probs[i]

        return move


if __name__ == "__main__":
    import time

    model = load_rknn_model("az.rknn")
    player = AlphaZeroRKPlayer(model=model, simulations=800)

    start_time = time.time()
    move = player.get_action(board=["", "X", "", "", "", "", "", "", ""])
    end_time = time.time()

    print(f"Time taken: {end_time - start_time} seconds, result: {move}")

    model.release()
