import numpy as np
import copy
from utilities import *
import sys
sys.path.append('../')
from board_fast import Board,Game,Player,Point,Move
from anytree import AnyNode,search,LevelOrderIter,RenderTree,Node,Walker
import math
import random
from utility.keras_modal import DenseModel
import pathlib
import os

#仿真后整个过程是：
#   1：计算V
#   2：计算Q_v
#   3：计算utility
#   4：计算T
def utility(Np,N,P):  #计算效用函数
    Cu=5
    return Cu*math.sqrt(Np)*(P/(1+N))

def T(Q,utility): #计算最终值
    return Q+utility

def Q_v(V,N):   #计算价值
    return  V/N #V本该是求和的,但是求和这步在每次随机仿真时就算好了

def V(q,rollout):   #每次仿真后就计算
    lamda=0.5
    return lamda*q+(1-lamda)*rollout

class tree:
    def __init__(self,player):
        self.root=AnyNode(N=1,player=player)
    def getNodeLeaves(self,node_start): #也依赖这个函数来判断节点是否有子节点,返回[]表示没有子节点
        return [node for node in LevelOrderIter(node_start, filter_=lambda n: n is not node_start,maxlevel=2)]
    def addLeaves(self,parent,q,P,N,move,player):
        V=0
        Q=0
        if player!=self.root.player:
            P=-P
            q=-q
        T=P/(1+N)
        AnyNode(parent=parent,q=q,P=P,N=N,V=V,T=T,move=move,player=player)
    def findMax(self,nodes):
        node=max(nodes,key=lambda k:abs(k.T))
        node.N+=1
        return node
    def findMove(self):
        nodes=self.getNodeLeaves(self.root)
        if len(nodes)!=0:
            node=max(nodes,key=lambda k:k.N)
            return node.move
        else:
            return None

def update_nodes(node,root,winner):
    w=Walker()
    nodes=w.walk(node,root)
    rollout=0
    for i in nodes[0]:
        if i is not root:
            if winner is not None:
                if i.player==winner:
                    rollout=1
                elif i.player.other()==winner:
                    rollout=-1
                else:
                    None
            else:
                None
            value=V(i.q,rollout)
            i.V+=value
            Q=Q_v(i.V,i.N)
            u=utility(i.parent.N,i.N,i.P)
            i.T=T(Q,u)

class ai_bot:
    def __init__(self,boardSize=9,rand=False):
        self.model_s=DenseModel(boardSize=boardSize,model='alpha_simple')
        self.model_c=DenseModel(boardSize=boardSize,model='alpha_complicate')
        self.model_v=DenseModel(boardSize=boardSize,model='alpha_value')
        self.boardSize=boardSize
        self.moves=frozenset()

    def load_weights(self,weight_s,weight_c,weight_v):
        self.model_s.model_load_weights(weight_s)
        self.model_c.model_load_weights(weight_c)
        self.model_v.model_load_weights(weight_v)

    def isLegal(self,point,board,player):    #包括不会堵死自己的眼位和自杀
        neighbours=board.get_neighbors(point)
        is_eye=True
        is_self_captured=True
        the_other_strings=[]
        own_strings=[]
        liberties=[]
        #判断是不是眼位
        for i in neighbours:
            if board.grid.get(i)==None:
                is_eye=False
                break
            elif board.grid.get(i).player != player:
                is_eye=False
                break
            elif len(board.grid.get(i).liberties)<=1:
                is_eye=False
                break
            else:
                pass
        #判断是不是自杀
        for i in neighbours:
            go_string_t=board.grid.get(i)
            if go_string_t is not None: #有人占据了
                if go_string_t.player == player:    #自己的
                    if go_string_t not in own_strings:
                        own_strings.append(go_string_t)
                else:
                    if go_string_t not in the_other_strings:
                        the_other_strings.append(go_string_t)
            else:   #空子
                liberties.append(i)
        if len(liberties) == 0:
            if any(len(str.liberties) == 1 for str in the_other_strings):   #如果落子前对方有任一串棋只有一口气了，就是提子
                is_self_captured=False
            elif all(len(str.liberties) == 1 for str in own_strings):
                None    #自杀
            else:
                is_self_captured=False
        else:
            is_self_captured=False
        if is_eye or is_self_captured:
            return False
        return True

    def dl_mc(self,player,board,mask,depth=10,rounds=1000,s_depth=180,m_rounds=2):
        mc_tree=tree(player)
        boardsize=board.size
        for _ in range(rounds):
            mask_now=mask.flatten()
            node_now=mc_tree.root
            player_now=player
            board_now=copy.deepcopy(board)
            global board_his
            boardhis=copy.deepcopy(board_his)   #每个仿真都需要独立的棋盘记录
            for _ in range(depth):  #先根据自己的实力算depth步
                nodes=mc_tree.getNodeLeaves(node_now)
                if len(nodes)==0:   #针对当前局势还没有子节点,就先初始化所有合法节点,没有节点有两种可能，1是没访问过，2是棋局结束了
                    planes=np.vstack((to_plane_color(board_now,player_now),to_ones(board_now),to_zeros(board_now),
                            to_legal_moves(board_now,player_now), turn_since(board_now,not_reset=True,boardhis=boardhis),
                            liberties(board_now),three_in_one(board_now,player_now)))
                    planes=planes.reshape(1,planes.shape[1],planes.shape[2],-1) #调整为channel_last
                    pred_p=self.model_c.model_predict(planes)[0]
                    pred_p=mask_now*pred_p  #如果是当前棋局要下的那步棋，就要过滤掉非法的落子
                    for idx,item in enumerate(pred_p):
                        if item == 0:   #跳过非法落子
                            continue
                        move=np.unravel_index(idx, (boardsize,boardsize))   #move是普通tuple
                        point=Point(move[0]+1,move[1]+1)
                        if not self.isLegal(point,board_now,player_now):
                            continue    #跳过策略非法落子
                        mc_tree.addLeaves(node_now,None,item,0,move,player_now)
                nodes=mc_tree.getNodeLeaves(node_now)   #再重新拿一次节点
                if len(nodes)==0: #没有可以合法落子的地方，游戏结束,进入结算
                    result=board_now.getGameResult()
                    if result>0:
                        winner=Player.black
                    elif result<0:
                        winner=Player.white
                    else:
                        winner=None
                    update_nodes(node_now,mc_tree.root,winner)
                    break
                    '''
                        #如果在实力计算范围内游戏就结束了，那么就不需要做蒙特卡洛仿真了
                        move=mc_tree.findMove()
                        if move is not None:
                            return Point(move[0]+1,move[1]+1)
                        else:
                            return move
                    '''
                else:
                    node_now=mc_tree.findMax(nodes)
                    move=node_now.move
                    if node_now.q is None:  #DQN的效率比较低，为了提高效率，用到的时候才计算
                        planes=np.vstack((to_plane_color(board_now,player_now),to_ones(board_now),to_zeros(board_now),
                                to_legal_moves(board_now,player_now), turn_since(board_now,not_reset=True,boardhis=boardhis),
                                liberties(board_now),three_in_one(board_now,player_now)))
                        planes=planes.reshape(1,planes.shape[1],planes.shape[2],-1)
                        npmove=np.zeros((boardsize,boardsize))
                        npmove[move]=1 if player_now==Player.black else -1
                        move_value=self.model_v.model_predict({"input1":planes,"input2":npmove.flatten().reshape(1,-1)})[0]
                        node_now.q=move_value
                    point=Point(move[0]+1,move[1]+1)
                    board_now.play_stone(player_now,point)
                    player_now=player_now.other()
                    npdata=board_now.print_board(isprint=False)
                    mask_now=(npdata==0).astype(int).flatten()

            node_mark=node_now  #标记住当前的节点，以备后续更新
            for k in range(m_rounds):
                player_now_=copy.deepcopy(player_now)
                board_now_=copy.deepcopy(board)
                boardhis_=copy.deepcopy(board_his)
                mask_now_=copy.deepcopy(mask_now)
                isOver=False
                for _ in range(s_depth):
                    '''
                    开始随机仿真，随机仿真可以使用上面一样的方法（复杂），也可以使用策略网络按概率取值（简单）。
                    这里选择使用简单的方法，因为越远离初始节点的子节点利用率越低，在仿真局数不大的时候，用复杂网络的意义不大。
                    这里的仿真我们不设置全局同形的限制，所以要限制仿真深度，以避免全局同形，陷入无限循环之中
                    '''
                    planes=np.vstack((to_plane_color(board_now_,player_now_),to_ones(board_now_),to_zeros(board_now_),
                            to_legal_moves(board_now_,player_now_), turn_since(board_now_,not_reset=True,boardhis=boardhis_),
                            liberties(board_now_),three_in_one(board_now_,player_now_)))
                    planes=planes.reshape(1,planes.shape[1],planes.shape[2],-1)
                    pred_p=self.model_s.model_predict(planes)[0]
                    while True:
                        if mask_now_.sum()==0:   #没有可以落子的地方了，棋局结束，开始结算
                            isOver=True
                            break
                        pred_valid=mask_now_*pred_p
                        pick=random.choices(range(boardsize*boardsize),weights=pred_valid.flatten())
                        move=np.unravel_index(pick[0], (boardsize,boardsize))
                        #move=np.unravel_index(pred_p.argmax(), (boardsize,boardsize))
                        point=Point(move[0]+1,move[1]+1)
                        if self.isLegal(point,board_now_,player_now_):
                            break
                        else:
                            mask_now_[pick[0]]=False
                            continue
                    if isOver:
                        break
                    board_now_.play_stone(player_now_,point)
                    player_now_=player_now_.other()
                    npdata=board_now_.print_board(isprint=False)
                    mask_now_=(npdata==0).astype(int).flatten()
                #开始结算
                if isOver:
                    result=board_now_.getGameResult()
                    if result>0:
                        winner=Player.black
                    elif result<0:
                        winner=Player.white
                    else:
                        winner=None
                else:
                    winner=None
                update_nodes(node_mark,mc_tree.root,winner)
        move=mc_tree.findMove()
        if move is not None:
            return Point(move[0]+1,move[1]+1)
        else:
            return move





    def predict(self,player,board,reChoose=False,isRandom=False,depth=10,rounds=1000):
        if not reChoose:
            self.moves=frozenset()
        boardSize=self.boardSize
        npdata=board.print_board(isprint=False)
        mask=(npdata==0).astype(int)
        for i in self.moves:
            mask[i]=False
        if mask.sum()==0:   #没有可以落子的地方了
            return Move(is_pass=True)
        point=self.dl_mc(player,board,mask,depth=depth,rounds=rounds)
        if point is not None:
            move=(point.row-1,point.col-1)
            self.moves=self.moves|{move}
            move=Move(point=point)
        else:
            move=Move(is_pass=True)
        return move

if __name__ == "__main__":
    board = Board(size=4)
    game=Game(board)
    #board.print_board(isprint=True)
    ai_weights_s="./weights/ai_weights_s.h5"
    ai_weights_c="./weights/ai_weights_c.h5"
    ai_weights_v="./weights/ai_weights_v.h5"
    bot1=ai_bot(boardSize=4)
    bot2=ai_bot(boardSize=4)
    if pathlib.Path(ai_weights_s).exists() and pathlib.Path(ai_weights_c).exists() and pathlib.Path(ai_weights_v).exists():
        bot1.load_weights(ai_weights_s,ai_weights_c,ai_weights_v)
        bot2.load_weights(ai_weights_s,ai_weights_c,ai_weights_v)
    #player=Player.black
    #move=bot1.predict(player,board,reChoose=False,isRandom=False,depth=3,rounds=10)
    #print(move.point)
    result=game.run_alpha_go_train(play_b=bot1,play_w=bot2,isprint=True,depth=3,rounds=3)
    print(result)
