import logging
import math
import numpy as np
import os
import kp

from TicTacToe.TicTacToeGame import TicTacToeGame, AverageMeter, dotdict
from .base_tpu import TPU

EPS = 1e-8
args = dotdict({'numMCTSSims': 15})   
log = logging.getLogger(__name__)

def log_softmax(z):
    max_z = np.max(z)
    log_sum_exp = np.log(np.sum(np.exp(z - max_z)))
    return z - max_z - log_sum_exp

def tanh(x):
    return np.tanh(x)

class PredictMcts(TPU):

    def __init__(self, nef_model_id=1):
        super().__init__()
        # 提取文件名, 输出对应的文件路径
        file_name = os.path.basename(__file__)
        print(f"{file_name}.py path : {self.pwd}")

        # 
        self.game = TicTacToeGame() # TicTacToeGame包含游戏规则，包括下的位置是否有效，游戏是否结束，判断胜负等
        self.args = args
        self.args.cpuct = 1.0
        self.Qsa = {}  # stores Q values for s,a (as defined in the paper)  # s是状态，a是走法,走法“a”构成状态“s”能够获得的期望值
        self.Nsa = {}  # stores #times edge s,a was visited # s状态下，选择走法a的次数
        self.Ns = {}  # stores #times board s was visited   # 当前状态被访问的次数
        self.Ps = {}  # 存储策略 stores initial policy (returned by neural net) # 当前状态下，每个位置的概率
        self.Es = {}  # 保存游戏的状态 stores game.getGameEnded ended for board s # 保存当前状态下，游戏是否结束
        self.Vs = {}  # stores game.getValidMoves for board s   # 保存当前状态下，可行的行动

        self.nef_model_id = 1   # 模型加载的索引，0为目标检测模型，1为推理模型，默认为1

    def model_init(self, model_path, host=False):
        super().model_init(model_path, host)

        # 初始化模型相关参数，数据预处理部分需要使用
        self.init_nef_model_config()
    
    def model_load(self, model_nef_descriptor, device_group):
        self.model_nef_descriptor = model_nef_descriptor
        self.device_group = device_group    #
        # 初始化模型相关参数，数据预处理部分需要使用
        self.init_nef_model_config()


    def init_nef_model_config(self):
        # print("self.nef_model_id:", self.nef_model_id)

        self.model_input_radix = self.model_nef_descriptor.models[self.nef_model_id].input_nodes[0].quantization_parameters.quantized_fixed_point_descriptor_list[0].radix
        self.model_input_scale = self.model_nef_descriptor.models[self.nef_model_id].input_nodes[0].quantization_parameters.quantized_fixed_point_descriptor_list[0].scale

        # get model input data layout
        self.model_input_data_layout = self.model_nef_descriptor.models[self.nef_model_id].input_nodes[0].data_layout

        # get model input size (shape order: BxCxHxW)
        self.model_input_channel = self.model_nef_descriptor.models[self.nef_model_id].input_nodes[0].shape_npu[1]
        self.model_input_height = self.model_nef_descriptor.models[self.nef_model_id].input_nodes[0].shape_npu[2]
        self.model_input_width = self.model_nef_descriptor.models[self.nef_model_id].input_nodes[0].shape_npu[3]
    
    def set_numMCTSSims(self, numMCTSSims): # 设置MCTS搜索次数
        self.args.numMCTSSims = numMCTSSims

    def set_cur_player(self, cur_player):
        self.cur_player = cur_player

    # 数据预处理
    def preprogress(self, board):
        img = (board+1).reshape(3, 3, 1)
        data = img.astype(np.int32)

        # toolchain calculate the radix value from input data (after normalization), and set it into NEF model.
        # NPU will divide input data "2^radix" automatically, so, we have to scaling the input data here due to this reason.
        data = data *(np.power(2, self.model_input_radix) * self.model_input_scale)    # 
        data = np.round(data)
        data = np.clip(data, -128, 127).astype(np.int8)

        # re-layout the data to fit NPU data layout format
        # KL520 supported NPU input layout format: 4W4C8B
        # [note] For the KL520 hardware limitation, the '4W' need to be width aligned to 16.
        if kp.ModelTensorDataLayout.KP_MODEL_TENSOR_DATA_LAYOUT_4W4C8B == self.model_input_data_layout:
            width_align_base = 16
            channel_align_base = 4
        else:
            print(' - Error: invalid input NPU layout format {}'.format(str(self.model_input_data_layout)))
            exit(0)

        # calculate width alignment size, channel block count
        model_input_width_align = width_align_base * math.ceil(self.model_input_width / float(width_align_base))
        model_input_channel_block_num = math.ceil(self.model_input_channel / float(channel_align_base))

        # create re-layout data container
        # KL520 dimension order: HxCxW
        re_layout_data = np.zeros((self.model_input_height,
                                model_input_channel_block_num,
                                model_input_width_align,
                                channel_align_base), dtype=np.int8)

        # fill data in re-layout data container
        model_input_channel_block_offset = 0
        for model_input_channel_block_idx in range(model_input_channel_block_num):
            model_input_channel_block_offset_end = model_input_channel_block_offset + channel_align_base
            model_input_channel_block_offset_end = model_input_channel_block_offset_end if model_input_channel_block_offset_end < self.model_input_channel else self.model_input_channel

            re_layout_data[:self.model_input_height,
                        model_input_channel_block_idx,
                        :self.model_input_width,
                        :(model_input_channel_block_offset_end - model_input_channel_block_offset)] = \
                data[:, :, model_input_channel_block_offset:model_input_channel_block_offset_end]

            model_input_channel_block_offset += channel_align_base
            
        # print("re_layout_data:", re_layout_data.shape)  # (3, 1, 16, 4)
        # print(re_layout_data)
        # convert re-layout data to npu inference buffer
        npu_input_buffer = re_layout_data.tobytes()
        # print(' - Success')
        return npu_input_buffer
    
    # 发送数据获取推理结果
    def send_data_and_get_result(self, npu_input_buffer):
        """
        prepare generic data inference input descriptor
        """
        generic_inference_input_descriptor = kp.GenericDataInferenceDescriptor(
            model_id=self.model_nef_descriptor.models[self.nef_model_id].id,
            inference_number=0,
            input_node_data_list=[kp.GenericInputNodeData(buffer=npu_input_buffer)]
        )

        """
        starting inference work
        """
        try:
            kp.inference.generic_data_inference_send(device_group=self.device_group,
                                                    generic_inference_input_descriptor=generic_inference_input_descriptor)

            generic_raw_result = kp.inference.generic_data_inference_receive(device_group=self.device_group)
        except kp.ApiKPException as exception:
            print(' - Error: inference failed, error = {}'.format(exception))
            exit(0)

        """
        retrieve inference node output 
        """
        # print('[Retrieve Inference Node Output ]')
        inf_node_output_list = []
        for node_idx in range(generic_raw_result.header.num_output_node):
            inference_float_node_output = kp.inference.generic_inference_retrieve_float_node(node_idx=node_idx,
                                                                                            generic_raw_result=generic_raw_result,
                                                                                            channels_ordering=kp.ChannelOrdering.KP_CHANNEL_ORDERING_CHW)
            inf_node_output_list.append(inference_float_node_output)

        # print(' - Success')
        pi = inf_node_output_list[0].ndarray.reshape(1, 10)
        v = inf_node_output_list[1].ndarray.reshape(1, 1)
        return pi, v
    
    
    def predict(self, canonicalBoard):
        """
        This function performs numMCTSSims simulations of MCTS starting from
        canonicalBoard.

        Returns:
            probs: a policy vector where the probability of the ith action is
                   proportional to Nsa[(s,a)]**(1./temp)
        """
        for i in range(self.args.numMCTSSims):  # 仿真次数 args.numMCTSSims # Number of games moves for MCTS to simulate.
            self.search(canonicalBoard)

        s = self.game.stringRepresentation(canonicalBoard)
        # counts 统计所有位置被选择的次数
        counts = [self.Nsa[(s, a)] if (s, a) in self.Nsa else 0 for a in range(self.game.getActionSize())]
        print("counts:", counts)
        
        bestAs = np.array(np.argwhere(counts == np.max(counts))).flatten()
        bestA = np.random.choice(bestAs)
        probs = [0] * len(counts)
        probs[bestA] = 1

        # 获取概率最大值的id
        action = np.argmax(probs)
        return action
     

    def search(self, canonicalBoard):
        """
        This function performs one iteration of MCTS. It is recursively called
        till a leaf node is found. The action chosen at each node is one that
        has the maximum upper confidence bound as in the paper.

        Once a leaf node is found, the neural network is called to return an
        initial policy P and a value v for the state. This value is propagated
        up the search path. In case the leaf node is a terminal state, the
        outcome is propagated up the search path. The values of Ns, Nsa, Qsa are
        updated.

        NOTE: the return values are the negative of the value of the current
        state. This is done since v is in [-1,1] and if v is the value of a
        state for the current player, then its value is -v for the other player.

        Returns:
            v: the negative of the value of the current canonicalBoard
        """
        s = self.game.stringRepresentation(canonicalBoard)  # board.tostring()

        if s not in self.Es:
            self.Es[s] = self.game.getGameEnded(canonicalBoard, 1)  # 判断游戏是否结束
        if self.Es[s] != 0:
            # terminal node
            return -self.Es[s]

        if s not in self.Ps:    # 当前状态下，每个位置的概率
            # leaf node
            board = canonicalBoard
            npu_input_buffer = self.preprogress(board)  # 数据预处理
            pi, v = self.send_data_and_get_result(npu_input_buffer) # 推理
            self.Ps[s], v = np.exp(log_softmax(pi))[0], tanh(v)[0]
            
            valids = self.game.getValidMoves(canonicalBoard, 1)
            self.Ps[s] = self.Ps[s] * valids  # masking invalid moves
            sum_Ps_s = np.sum(self.Ps[s])
            if sum_Ps_s > 0:
                self.Ps[s] /= sum_Ps_s  # renormalize   对于剩余可行的动作的概率进行归一化
            else:
                # if all valid moves were masked make all valid moves equally probable

                # NB! All valid moves may be masked if either your NNet architecture is insufficient or you've get overfitting or something else.
                # If you have got dozens or hundreds of these messages you should pay attention to your NNet and/or training process.   
                # log.error("All valid moves were masked, doing a workaround.")
                self.Ps[s] = self.Ps[s] + valids
                self.Ps[s] /= np.sum(self.Ps[s])

            self.Vs[s] = valids # 记录当前状态下可行的行动
            self.Ns[s] = 0
            return -v   # 
        # 如果当前状态已被记录
        valids = self.Vs[s]
        cur_best = -float('inf')
        best_act = -1

        # pick the action with the highest upper confidence bound
        for a in range(self.game.getActionSize()):
            if valids[a]:   # 如果当前动作可行
                if (s, a) in self.Qsa: # 如果当前状态已记录， u从上一个状态的Qsa[(s, a)]和当前的Ps[s][a]获得
                    u = self.Qsa[(s, a)] + self.args.cpuct * self.Ps[s][a] * math.sqrt(self.Ns[s]) / (
                            1 + self.Nsa[(s, a)])
                else:   # args.cpuct ？ self.Ps[s][a] 该位置的概率  self.Ns[s] 当前步数？  EPS 1e8 防止结果为0
                    u = self.args.cpuct * self.Ps[s][a] * math.sqrt(self.Ns[s] + EPS)  # Q = 0 ?

                if u > cur_best:
                    cur_best = u    # 当前最优选择的分数
                    best_act = a    # 当前的最优选择

        a = best_act    # 
        next_s, next_player = self.game.getNextState(canonicalBoard, 1, a)  # next_player = -1
        next_s = self.game.getCanonicalForm(next_s, next_player)    # 

        v = self.search(next_s) # 执行当前行动后，返回next_s的胜率*-1, 如果next_s的胜率为1，则v为-1

        if (s, a) in self.Qsa:
            # 更新Qsa[(s, a)]的数值, 
            # Qsa[(s, a)影响MCTS选择的次数，次数影响最终的概率
            self.Qsa[(s, a)] = (self.Nsa[(s, a)] * self.Qsa[(s, a)] + v) / (self.Nsa[(s, a)] + 1)
            # 选择Nsa[(s, a)]的次数
            self.Nsa[(s, a)] += 1   # 
        else:
            self.Qsa[(s, a)] = v    # 走法“a”构成状态“s”能够获得的期望值
            self.Nsa[(s, a)] = 1    # 选择当前状态的次数为1

        self.Ns[s] += 1 # 访问次数+1 
        return -v
