# -*- coding: utf-8 -*-
"""
Monte Carlo Tree Search in AlphaGo Zero style, which uses a policy-value
network to guide the tree search and evaluate the leaf nodes

@author: Junxiao Song
"""

import numpy as np
import copy

# SOFTMAX函数
def softmax(x):
    probs = np.exp(x - np.max(x))
    probs /= np.sum(probs)
    return probs

# 树节点
class TreeNode(object):
    """A node in the MCTS tree.

    Each node keeps track of its own value Q, prior probability P, and
    its visit-count-adjusted prior score u.
    MCTS的一树节点， 节点含有Q值，从父节点到此节点的选择概率P，以及访问次数调整后的探索分u
    """

    def __init__(self, parent, prior_p):
        self._parent = parent
        self._children = {}  # [action,prob], a map from action to TreeNode
        self._n_visits = 0
        self._Q = 0   # 在这里， 近似均值
        self._u = 0
        self._P = prior_p  # 从上节点到这个接点的概率

    # 展开
    def expand(self, action_priors):
        """Expand tree by creating new children.
        action_priors: a list of tuples of actions and their prior probability
            according to the policy function.
        通过创建新的子节点来展开数；
        行为与先验：依据策略函数的，一组行为和他们的先验概率
        """
        for action, prob in action_priors:
            if action not in self._children:
                self._children[action] = TreeNode(self, prob)
    # 选择
    def select(self, c_puct):
        """Select action among children that gives maximum action value Q
        plus bonus u(P).
        Return: A tuple of (action, next_node)
        在子节点中选择行为值Q+u最大的
        """
        return max(self._children.items(),
                   key=lambda act_node: act_node[1].get_value(c_puct))

    # 更新
    def update(self, leaf_value):
        """Update node values from leaf evaluation.
        leaf_value: the value of subtree evaluation from the current player's
            perspective.
        从叶子节点起，更新节点值
        """
        # Count visit.
        # 访问次数++
        self._n_visits += 1
        # Update Q, a running average of values for all visits.
        # 更新Q值。 简单均值 # 在这里， 近似均值
        self._Q += 1.0*(leaf_value - self._Q) / self._n_visits

    # 更新递归
    def update_recursive(self, leaf_value):
        """Like a call to update(), but applied recursively for all ancestors.
        类似调用update(), 只是递归其所有祖先节点
        """
        # If it is not root, this node's parent should be updated first.
        # 假如它不是根节点，它的父节点都应该被先更新
        if self._parent:
            self._parent.update_recursive(-leaf_value) # ？？这里是对手值取反吗？
        self.update(leaf_value)

    # 获得值
    def get_value(self, c_puct):
        """Calculate and return the value for this node.
        It is a combination of leaf evaluations Q, and this node's prior
        adjusted for its visit count, u.
        c_puct: a number in (0, inf) controlling the relative impact of
            value Q, and prior probability P, on this node's score.
        计算返回这个节点的值。, 这是节点访问次数越多， u越小， 起探索作用
        P(vi,v) is prior probability of the move (transition from v to vi )
        _P ,父节点选择此节点，给定的概率
        c_puct， 探索因子
        UCT 变种
        """
        self._u = (c_puct * self._P *
                   np.sqrt(self._parent._n_visits) / (1 + self._n_visits))
        return self._Q + self._u
    # 是否为叶子 节点
    def is_leaf(self):
        """Check if leaf node (i.e. no nodes below this have been expanded)."""
        return self._children == {}

    # 是否为根节点
    def is_root(self):
        return self._parent is None

# MCTS
class MCTS(object):
    """An implementation of Monte Carlo Tree Search."""

    def __init__(self, policy_value_fn, c_puct=5, n_playout=10000):
        """
        policy_value_fn: a function that takes in a board state and outputs
            a list of (action, probability) tuples and also a score in [-1, 1]
            (i.e. the expected value of the end game score from the current
            player's perspective) for the current player.
        c_puct: a number in (0, inf) that controls how quickly exploration
            converges to the maximum-value policy. A higher value means
            relying on the prior more.
        policy_value_fn：函数输入棋盘状态，以当前玩家的视角，输出一组行为和概率，以及估值
        c_puct：先验经验依赖度， 值越高意味着越依赖先验经验
        """
        self._root = TreeNode(None, 1.0)  # 第一个根节点
        self._policy = policy_value_fn # 策略价值网络函数
        self._c_puct = c_puct          # 探索常系数
        self._n_playout = n_playout # 模拟次数  400

    def _playout(self, state):
        """Run a single playout from the root to the leaf, getting a value at
        the leaf and propagating it back through its parents.
        State is modified in-place, so a copy must be provided.
        从根节点到子节点的播出；在子节点得到值再传回父节点
        State已经修改了， 所以需要COPY出来
        """

        node = self._root
        while(1):
            if node.is_leaf():
                break
            # Greedily select next move.
            # 贪婪地选择下一步行为
            action, node = node.select(self._c_puct)
            state.do_move(action)

        # Evaluate the leaf using a network which outputs a list of
        # (action, probability) tuples p and also a score v in [-1, 1]
        # for the current player.
        # 用网络来估计叶子 ，输出一组行为与概率，以及估值
        action_probs, leaf_value = self._policy(state) #policy_value_fn
        # Check for end of game.
        # 查看对弈结果
        end, winner = state.game_end()
        if not end:
            # 如果游戏已经结束，就不再扩展MCTS树了
            node.expand(action_probs)
        else:
            # for end state，return the "true" leaf_value
            if winner == -1:  # tie
                leaf_value = 0.0
            else:
                leaf_value = (
                    1.0 if winner == state.get_current_player() else -1.0
                )

        # Update value and visit count of nodes in this traversal.
        # 递归更新
        node.update_recursive(-leaf_value)

    def get_move_probs(self, state, temp=1e-3):
        """Run all playouts sequentially and return the available actions and
        their corresponding probabilities.;以当前状态，跑_n_playout次，返回有效行为和概率
        state: the current game state; 当前状态
        temp: temperature parameter in (0, 1] controls the level of exploration; 探索因子
        state:当前游戏状态
        获得移动概率
        """
        for n in range(self._n_playout):
            state_copy = copy.deepcopy(state)
            self._playout(state_copy)      # 这里棋局未变，但是MCTS却在EXPAND

        # calc the move probabilities based on visit counts at the root node
        act_visits = [(act, node._n_visits)
                      for act, node in self._root._children.items()]
        acts, visits = zip(*act_visits)
        act_probs = softmax(1.0/temp * np.log(np.array(visits) + 1e-10))

        return acts, act_probs
    # ROOT向新行为移动
    def update_with_move(self, last_move):
        """Step forward in the tree, keeping everything we already know
        about the subtree.
        """
        if last_move in self._root._children:
            self._root = self._root._children[last_move]
            self._root._parent = None
        else:
            self._root = TreeNode(None, 1.0)

    def __str__(self):
        return "MCTS"

# MCTS玩家
class MCTSPlayer(object):
    """AI player based on MCTS"""
    """基于MCTS的AI玩家"""

    def __init__(self, policy_value_function,
                 c_puct=5, n_playout=2000, is_selfplay=0):
        self.mcts = MCTS(policy_value_function, c_puct, n_playout)
        self._is_selfplay = is_selfplay

    def set_player_ind(self, p):
        self.player = p

    def reset_player(self):
        self.mcts.update_with_move(-1)

    # 获得行为
    def get_action(self, board, temp=1e-3, return_prob=0):
        sensible_moves = board.availables #  可用走子,类似[0,1,...,n]
        # the pi vector returned by MCTS as in the alphaGo Zero paper
        move_probs = np.zeros(board.width*board.height)
        if len(sensible_moves) > 0:
            acts, probs = self.mcts.get_move_probs(board, temp)
            move_probs[list(acts)] = probs
            if self._is_selfplay:
                # add Dirichlet Noise for exploration (needed for
                # self-play training)
                # 加入探索噪声
                move = np.random.choice(
                    acts,
                    p=0.75*probs + 0.25*np.random.dirichlet(0.3*np.ones(len(probs)))
                )
                # update the root node and reuse the search tree
                self.mcts.update_with_move(move)
            else:
                # with the default temp=1e-3, it is almost equivalent
                # to choosing the move with the highest prob
                move = np.random.choice(acts, p=probs)
                # reset the root node
                self.mcts.update_with_move(-1)
#                location = board.move_to_location(move)
#                print("AI move: %d,%d\n" % (location[0], location[1]))

            if return_prob:
                return move, move_probs
            else:
                return move
        else:
            print("WARNING: the board is full")

    def __str__(self):
        return "MCTS {}".format(self.player)
