
import numpy as np
import torch
import copy
from numba import jit
from torch.distributions.dirichlet import Dirichlet
#


@jit(nopython=True)
def softmax(arr, temp):
    arr = 1. / temp * np.log(arr + 1e-10)
    probs = np.exp(arr - np.max(arr))
    probs /= np.sum(probs)
    return probs


@jit(nopython=True)
def get_value(c_value, p_n_visits, n_visits, Q, p):
    eps = 1e-10
    u = p*c_value*np.sqrt(p_n_visits)/(n_visits + eps)  # 先用UCB的方法
    return Q + u  # 找最小值


class Node():

    def __init__(self, parent, action_prob):
        self.parent = parent
        self.children = {}  # {action, node}， action=（idx, value）
        self.u = 0
        self.Q = 0
        self.visit_n = 0
        self.p = action_prob

    def get_value(self, c_puct):
        """
        目的：计算当前节点UCT值
        :return:
        """
        # todo:这里需要优化！！！！！
        return get_value(c_puct, self.parent.visit_n, self.visit_n, self.Q, self.p)

        # self.u =  c_puct*self.p*torch.sqrt(torch.tensor(self.parent.visit_n))/(self.visit_n+1)
        # return self.u + self.Q

    def select(self, c_puct):
        """
        :param c_puct:是一个超参数，用于控制探索与挖掘平衡
        lambda  a : b , 实际上就是：lambda是一个输入值为a，返回值为b的函数。
        : return node=(action, node)
        代码逻辑：
        1)遍历当前节点的子节点：使用self._children.item()
        2)选取子节点中，uct值最大的个体：调用get_value
        """
        # 方式1：简化代码
        action, node = max(self.children.items(),
                           key=lambda act_node: act_node[1].get_value(c_puct))
        # 方式2.正常逻辑
        # v = 0
        # node = None
        # for act, child in self.children.items():
        #     v_new = child.get_value(c_puct)
        #     if v_new > v:
        #         node = child
        return action, node

    def expand(self, action_probs_zip):
        """
        :param action_probs:为当前节点下的子节点的动作与概率集合【act, act_prob】
        :return:
        """
        # 这里的act是float类型
        for act, act_prob in action_probs_zip:
            if act not in self.children:
                # 增加节点
                self.children[act.item()] = Node(self, act_prob.item())
                self.children[act.item()].parent = self

    def update(self, leaf_value):
        self.visit_n += 1
        self.Q += 1.0 * (leaf_value - self.Q) / self.visit_n

    def update_recursive(self, leaf_value):
        if self.parent:  # 当有父节点时,从父节点开始更新
            self.parent.update_recursive(leaf_value)
        self.update(leaf_value)

    def is_leaf(self):
        return self.children == {}

    def is_root(self):
        self.parent = None


class MCTS():
    def __init__(self, policy, c_puct, n_playout):
        self.policy = policy
        self.root = Node(None, 1.)
        self.c_puct = c_puct
        self.n_rollout = n_playout
        self.base_root = self.root

    def _playout(self, srq):
        """
        Target: 一个回合的MCTS，即针对叶节点进行数据更新
        :param board:
        :return:
        """

        # 第1步：循环至叶节点
        node = self.root
        while(1):
            if node.is_leaf():
                break
            action, node = node.select(self.c_puct)
            srq.do_move(action)  # 更新board的状态
            # print(f"到第 {srq.last_move_idx} 个变量了")

        # 第2步：扩展叶节点,判断新的board下，是否游戏结束
        # 注：当结束时采用真实价值，未结束是采用policy预测价值
        end, target = srq.end_optimization()  # 判断到叶节点时，游戏是否结束

        if not end:
            action_probs_zip, leaf_value = self.policy(srq)  # 在叶节点根据策略选择动作
            node.expand(action_probs_zip)  # 没有结束进行拓展
        else:
            leaf_value = target
        # 第3步：通过递归更新节点n_visit和Q值
        node.update_recursive(leaf_value)

    def get_mcts_move_prob(self, srq, temp=1e-3):
        """Runs all playouts sequentially and returns the available actions and their corresponding probabilities
        Arguments:
        state -- the current state, including both game state and the current player.
        temp -- temperature parameter in (0, 1] 用于控制探索力度
        Returns:
        the available actions and the corresponding probabilities
        只返回了MCTS遍历到的节点，并不包含所有节点
        """
        for i in range(self.n_rollout):
            # print(f"第{i}次rollout")
            # todo:dl的工作可以嵌入这里
            copy_srq = copy.deepcopy(srq)  # 实现每次调用的srq独立，并且相同
            self._playout(copy_srq)
        acts = srq.avaliable_move_value
        # 使用gpu
        child_visits = torch.tensor(
            list(map(lambda x: x.visit_n, self.root.children.values())), dtype=torch.float32)
        # child_visits = np.array(list(map(lambda x:x.visit_n,self.root.children.values())))

        act_probs = torch.softmax(child_visits, dim=0)  # 使用gpu
        # act_probs = softmax(child_visits,temp)
        return acts, act_probs

    def update_with_move(self, last_move):
        """
        目的：当前节点MCTS循环结束，更新为下一节点（根据下一节点是否存在进行更新）
        :param last_move: 最后一步
        """
        if last_move in self.root.children:

            self.root = self.root.children[last_move]
            # self.root.parent = None
        else:
            # todo:建议将初始根节点保存起来
            # self.root = Node(None, 1.)
            self.root = self.base_root


class MCTS_player():
    def __init__(self, policy, c_puct=5, n_playout=10, training=True):
        self.mcts = MCTS(policy, c_puct, n_playout)
        self.training = training

    def get_action(self, srp, temp=1e-3, return_prob=1):
        """
        目的：返回当前board下的所有可能动作及其概率,
        逻辑：
        1）判断board是否已满；
        1.1）判断是否是训练状态
        1.2）判断是否返回概率act_prob （可以舍去）
        :param board:
        :param return_prob:
        :return:
        """
        sensible_move_idx = srp.last_move_idx
        # sensible_move_value = srp.avaliable_move_value
        if sensible_move_idx < srp.dim_num:
            acts, act_probs = self.mcts.get_mcts_move_prob(
                srp, temp)  # acts:[,,,]：动作数值， act_probs:[,,,]

            if self.training:  # 当为自学习训练状态是,增加Dirichlet noise，提高exploration
                # move = np.random.choice(acts, p = 0.75*act_probs + \
                #                                   0.25*np.random.dirichlet(0.3*np.ones(len(act_probs))))
                # 使用gpu
                p = 0.75*act_probs + 0.25 * \
                    Dirichlet(0.3*torch.ones(len(act_probs))).sample()
                move = acts[torch.multinomial(p, 1).item()]
                self.mcts.update_with_move(move)
            else:
                # move = np.random.choice(acts, p=act_probs)
                # 使用gpu
                move = acts[torch.multinomial(act_probs, 1).item()]
                self.mcts.update_with_move(move)

            if return_prob:
                # move_probs = act_probs #填充可选动作概率
                # move: float当前路径下，下一维度数值； move_probs：[srp.split_num, ]下一维度所有动作概率
                return move, act_probs
            else:
                return move
        else:
            print("WARNING: 所有优化参数均已选取完毕")


def policy(srq):
    depth, legal_value = srq.get_avaliable_move()
    current_state = srq.current_state()
    probs = torch.softmax(legal_value, dim=0)
    value = 1000
    act_probs = zip(legal_value, probs)
    return act_probs, value


if __name__ == '__main__':
    from game import Game, Same_range_problems
    p = policy
    player = MCTS_player(policy)
    srp = Same_range_problems()
    game = Game(srp)
    game.start_play(player)
