import datetime
import math
import random


class Node(object):
    type: str

    def __init__(self, state, state_after_action, action=None, type='max', parent=None):
        # 节点状态
        self.state = state

        # 定义该节点是max节点还是min节点
        self.type = type

        # 节点反种类
        self.anti_type = 'min' if type == 'max' else 'max'

        # 该节点被访问次数
        self.visit = 0

        # 该节点的奖励分值
        self.reward = 0

        # 该节点的可行action
        self.available_action = None

        # 该节点的儿子
        self.children = []

        # 该节点的父亲
        self.parent = parent

        # 从父亲节点到此节点所进行的操作
        self.action = action

        # 状态之间的变换函数,state_after_action(action, type, state)返回acton后下一步的新状态.使用时需具体定义.
        self.state_after_action = state_after_action

    # 扩展子节点
    def append_child(self, action = None,board = None):
        # 子节点扩展时记录子节点对应的state,action,type等


        if board is not None:
            node = Node(state=board,
                        state_after_action=self.state_after_action, type=self.anti_type, action=action, parent=self)
        else:
            node = Node(state=self.state_after_action(state=self.state, action=action, type=self.type),
                        state_after_action=self.state_after_action, type=self.anti_type, action=action, parent=self)
        self.children.append(node)
        return node

# UCB上界估计 Q:奖励总分 N:节点经过总数 t:总模拟数 C:超参数
def UCB(Q, N, t, C):
    return Q / N + C * math.sqrt(2 * math.log(t) / N)

# 蒙特卡洛树
class Monte_tree(object):
    def __init__(self, ori_state=None, find_all_next=None, state_after_action=None, get_reward=None, C=0.7, type=None):
        # 初始化根节点
        self.root = [Node(type=type, state=ori_state, state_after_action=state_after_action)]

        # find_all_next(current)将表示current节点下一步的所有可行action,使用时具体定义
        self.find_all_next = find_all_next

        # get_reward(current)将表示current节点作为结束状态的奖励值,使用时具体定义
        self.get_reward = get_reward

        # 蒙特卡洛树的总模拟搜索次数
        self.all_visit = 0

        # 超参数
        self.C = C

        # currents 列表动态存储一个current节点,用于遍历蒙特卡洛树.current代表搜索到达的节点.
        self.currents = []
        # current 从根节点开始
        self.currents.append(self.root[0])

    def back_to_root(self):
        # currents 列表动态存储一个current节点,用于遍历蒙特卡洛树.current代表搜索到达的节点.
        self.currents = []
        # current 从根节点开始
        self.currents.append(self.root[0])

    # 蒙特卡洛树的current节点向深处扩展或者搜索一次.
    def front_simulator(self):
        # 得到所有可能的action
        if self.currents[0].available_action is None:

            self.currents[0].available_action = self.find_all_next(self.currents[0])

        all_next = self.currents[0].available_action
        # 无可能的action,即状态结束.
        if all_next is None:
            return None

        N = len(all_next)

        # situation随机决定是扩展还是在原有的子节点中搜索
        situation = 1

        # exist_next_ind表示action里对应到达已有子节点的索引
        exist_next_ind = []
        # other_next_ind表示action里对应到达需扩展子节点的索引
        other_next_ind = []

        # 利用已有子节点记录的action来得到exist_next_ind
        for child in self.currents[0].children:
            if child.action in all_next:
                exist_next_ind.append(all_next.index(child.action))

        # 剩下的就是other_next_ind
        for i in range(N):
            if i not in exist_next_ind:
                other_next_ind.append(i)

        a = len(exist_next_ind)
        b = len(other_next_ind)

        # 若无可扩展,则从已有子节点中搜索
        if b == 0:
            situation = 0
        # 若无已有子节点,则扩展.(注意到N=a+b,故a,b不会同时为0)
        if a == 0:
            situation = 1

        # 扩展情况
        if situation > 0:
            # 随机选择一个扩展节点
            action = all_next[other_next_ind[random.randint(0, b - 1)]]
            o = self.currents[0].append_child(action=action)
            self.currents.pop(0)
            self.currents.append(o)

        if situation == 0:
            # 根据UCBs选择一个已有节点
            UCBs = []
            for ind in range(len(exist_next_ind)):
                child = self.currents[0].children[ind]
                UCBs.append(UCB(child.reward, child.visit, self.all_visit, self.C))
            ind = UCBs.index(max(UCBs))
            self.currents.append(self.currents[0].children[ind])
            self.currents.pop(0)

        return True

            # 回溯更新
    def back_propagation(self):
        # 获取奖励信息
        reward = self.get_reward(self.currents[0])
        self.currents[0]
        # 回溯更新信息(访问次数,奖励分值),直到到达原始根节点
        while self.currents[0].parent is not None:
            self.currents[0].visit += 1

            # 不同类型节点的奖励分值
            type_num = 1 if self.currents[0].type == 'max' else -1
            self.currents[0].reward += type_num * reward

            # 向上迭代
            self.currents.append(self.currents[0].parent)
            self.currents.pop(0)


    # 单次蒙特卡洛搜索
    def UCTSearch_once(self):
        self.currents.pop(0)
        self.currents.append(self.root[0])
        while True:
            # 选择下一个到达的节点,返回None说明游戏结束,simulator会自动扩展树.
            flag = self.front_simulator()
            if flag is None:
                self.back_propagation()
                break

            # 扩展子节点


        self.all_visit += 1
        return True

    def UCTSearch(self, time_limit):
        start_time = datetime.datetime.now()
        while True:
            self.UCTSearch_once()

            # 计时器
            now_time = datetime.datetime.now()
            if (now_time - start_time).seconds >= time_limit:
                break

        self.front_simulator()
        return self.currents[0].action
