import logging
import math
import os
import numpy as np

import de

from TicTacToe.TicTacToeGame import TicTacToeGame, AverageMeter, dotdict
from .base_tpu import TPU


args = dotdict({'numMCTSSims': 15})
EPS = 1e-8

log = logging.getLogger(__name__)

def log_softmax(z):
    max_z = np.max(z)
    log_sum_exp = np.log(np.sum(np.exp(z - max_z)))
    return z - max_z - log_sum_exp

def tanh(x):
    return np.tanh(x)

class PredictMcts(TPU):

    def __init__(self, cur_player=1):

        super().__init__()
        # 提取文件名, 输出对应的文件路径
        file_name = os.path.basename(__file__)
        print(f"{file_name}.py path : {self.pwd}")

        self.game = TicTacToeGame() # TicTacToeGame包含游戏规则，包括下的位置是否有效，游戏是否结束，判断胜负等
        self.args_ = args   # numMCTSSims搜索树搜索次数

        self.engine = None
        self.args = args
        self.args.cpuct = 1.0   # 默认cpuct=1.0
        self.Qsa = {}  # stores Q values for s,a (as defined in the paper)  # s是状态，a是走法,走法“a”构成状态“s”能够获得的期望值
        self.Nsa = {}  # stores #times edge s,a was visited # s状态下，选择走法a的次数
        self.Ns = {}  # stores #times board s was visited   # 当前状态被访问的次数
        self.Ps = {}  # 存储策略 stores initial policy (returned by neural net) # 当前状态下，每个位置的概率
        self.Es = {}  # 保存游戏的状态 stores game.getGameEnded ended for board s # 保存当前状态下，游戏是否结束
        self.Vs = {}  # stores game.getValidMoves for board s   # 保存当前状态下，可行的行动
        try:
            self.format = de.PixelFormat.DE_PIX_FMT_GRAY8
            # self.format = de.PixelFormat.DE_PIX_FMT_RGB888_PLANE
        except:
            print("no module name de")
        self.shape = (1, 1, 3, 3)
        self.cur_player = cur_player

    def set_numMCTSSims(self, numMCTSSims): # 设置MCTS搜索次数
        self.args.numMCTSSims = numMCTSSims

    def set_cur_player(self, cur_player):
        self.cur_player = cur_player

    def predict(self, canonicalBoard):
        """
        This function performs numMCTSSims simulations of MCTS starting from
        canonicalBoard.

        Returns:
            probs: a policy vector where the probability of the ith action is
                   proportional to Nsa[(s,a)]**(1./temp)
        """
        # print("self.args.numMCTSSims: ", self.args.numMCTSSims)
        board = canonicalBoard *self.cur_player
        for i in range(self.args.numMCTSSims):  # 仿真次数 args.numMCTSSims # Number of games moves for MCTS to simulate.
            self.search(board)

        s = self.game.stringRepresentation(board)
        # counts 统计所有位置被选择的次数
        counts = [self.Nsa[(s, a)] if (s, a) in self.Nsa else 0 for a in range(self.game.getActionSize())]
        # print("counts:", counts)
        
        bestAs = np.array(np.argwhere(counts == np.max(counts))).flatten()
        bestA = np.random.choice(bestAs)
        probs = [0] * len(counts)
        probs[bestA] = 1.
        action = np.argmax(probs)
        return action

    def search(self, canonicalBoard):
        """
        This function performs one iteration of MCTS. It is recursively called
        till a leaf node is found. The action chosen at each node is one that
        has the maximum upper confidence bound as in the paper.

        Once a leaf node is found, the neural network is called to return an
        initial policy P and a value v for the state. This value is propagated
        up the search path. In case the leaf node is a terminal state, the
        outcome is propagated up the search path. The values of Ns, Nsa, Qsa are
        updated.

        NOTE: the return values are the negative of the value of the current
        state. This is done since v is in [-1,1] and if v is the value of a
        state for the current player, then its value is -v for the other player.

        Returns:
            v: the negative of the value of the current canonicalBoard
        """
        s = self.game.stringRepresentation(canonicalBoard)  # board.tostring()

        if s not in self.Es:
            self.Es[s] = self.game.getGameEnded(canonicalBoard, 1)  # 判断游戏是否结束
        if self.Es[s] != 0:
            # terminal node
            return -self.Es[s]

        if s not in self.Ps:    # 当前状态下，每个位置的概率
            # leaf node
            
            board = np.array(canonicalBoard).reshape(self.shape)
            # print(board)
            # board = board*(-(self.cur_player))          # 后手*-1   先手1  
            board = (board+1)*127.0   ############33
            # board = board*255.0
            board = board.astype(np.float32)
            # print("board1")
            # print(board/128.0-1)
            data = [(self.format, self.shape, board)]
            output = self.engine.predict(data)   # 每个位置的概率， v 玩家的胜率 batch_size*action_size batch_size*1    torch.tanh(v)
            pi, v = output[0], output[1]
            self.Ps[s], v = np.exp(log_softmax(pi))[0], tanh(v)[0]
            # print("pi: ", pi)
            # print("v: ", v)
            # self.Ps[s], v = self.engine.predict(canonicalBoard)
            
            valids = self.game.getValidMoves(canonicalBoard, 1)
            # print("valids:", valids)
            self.Ps[s] = self.Ps[s] * valids  # masking invalid moves
            sum_Ps_s = np.sum(self.Ps[s])
            if sum_Ps_s > 0:
                self.Ps[s] /= sum_Ps_s  # renormalize   对于剩余可行的动作的概率进行归一化
            else:
                # if all valid moves were masked make all valid moves equally probable

                # NB! All valid moves may be masked if either your NNet architecture is insufficient or you've get overfitting or something else.
                # If you have got dozens or hundreds of these messages you should pay attention to your NNet and/or training process.   
                # log.error("All valid moves were masked, doing a workaround.")
                self.Ps[s] = self.Ps[s] + valids
                self.Ps[s] /= np.sum(self.Ps[s])

            self.Vs[s] = valids # 记录当前状态下可行的行动
            self.Ns[s] = 0
            return -v   # 
        # 如果当前状态已被记录
        valids = self.Vs[s]
        cur_best = -float('inf')
        best_act = -1

        # pick the action with the highest upper confidence bound
        for a in range(self.game.getActionSize()):
            if valids[a]:   # 如果当前动作可行
                if (s, a) in self.Qsa: # 如果当前状态已记录， u从上一个状态的Qsa[(s, a)]和当前的Ps[s][a]获得
                    u = self.Qsa[(s, a)] + self.args.cpuct * self.Ps[s][a] * math.sqrt(self.Ns[s]) / (
                            1 + self.Nsa[(s, a)])
                else:   # args.cpuct ？ self.Ps[s][a] 该位置的概率  self.Ns[s] 当前步数？  EPS 1e8 防止结果为0
                    u = self.args.cpuct * self.Ps[s][a] * math.sqrt(self.Ns[s] + EPS)  # Q = 0 ?

                if u > cur_best:
                    cur_best = u    # 当前最优选择的分数
                    best_act = a    # 当前的最优选择

        a = best_act    # 
        next_s, next_player = self.game.getNextState(canonicalBoard, 1, a)  # next_player = -1
        next_s = self.game.getCanonicalForm(next_s, next_player)    # 

        v = self.search(next_s) # 执行当前行动后，返回next_s的胜率*-1, 如果next_s的胜率为1，则v为-1

        if (s, a) in self.Qsa:
            # 更新Qsa[(s, a)]的数值, 
            # Qsa[(s, a)影响MCTS选择的次数，次数影响最终的概率
            self.Qsa[(s, a)] = (self.Nsa[(s, a)] * self.Qsa[(s, a)] + v) / (self.Nsa[(s, a)] + 1)
            # 选择Nsa[(s, a)]的次数
            self.Nsa[(s, a)] += 1   # 
        else:
            self.Qsa[(s, a)] = v    # 走法“a”构成状态“s”能够获得的期望值
            self.Nsa[(s, a)] = 1    # 选择当前状态的次数为1

        self.Ns[s] += 1 # 访问次数+1 
        return -v
