import sys
sys.path.append('../')
from board_fast import Board,Game,Player,Point,Move
from tensorflow import keras
import numpy as np
from scipy.stats import binom_test
from utility.keras_modal import DenseModel
import random
from scipy.stats import binom_test
import os
from pathlib import Path
from utility.hdf5 import HDF5

class TrainRobot():
    def __init__(self,net='ac',boardSize=9,rand=False):
        self.model=DenseModel(boardSize=boardSize,model=net)
        self.boardSize=boardSize
        self.moves=frozenset()  #{(input,output)}
        self.rand=rand
        #self.errCollections=[]  #记录网络走出的异常落子，这些走法都是非法的，要让网络学会（不包含全局同形）[(input,output)]
    def compile(self):
        self.model.compile_ac()
    def fit(self,x_train,y_train):
        y_train_p=y_train[:,:-1]
        y_train_v=y_train[:,-1:]
        self.model.fit_ac(x_train,y_train_p,y_train_v)
    def reset(self):
        self.moves=frozenset()
    def load_weights(self,fileName):
        self.rand=False #使用网络权重预测，需要关闭随机策略
        self.model.model_load_weights(fileName)
    def unload_weights(self,fileName):
        self.model.model_save_weights(fileName)

    def isPolicyLegal(self,point,board,player):
        neighbours=board.get_neighbors(point)
        is_eye=True
        for i in neighbours:
            if board.grid.get(i)==None:
                is_eye=False
                break
            elif board.grid.get(i).player != player:
                is_eye=False
                break
            elif len(board.grid.get(i).liberties)<=1:
                is_eye=False
                break
            else:
                pass
        if is_eye:
            return False
        return True

    def predict(self,player,board,reChoose=False,isRandom=False): #game_input的形状是棋盘形状
        if not reChoose:
            self.moves=frozenset()
        boardSize=self.boardSize
        #mask=np.ones((self.boardSize,self.boardSize))   #让网络学会已有子的地方不能落子,一开始，所有的棋盘点都可落子
         #使用网络
        npdata=board.print_board(isprint=False)
        mask=(npdata==0).astype(int)
        for i in self.moves:
            mask[i]=False
        if player == Player.black or player == 1:
            color=np.ones((1,1)).flatten()
        else:
            color=-1*np.ones((1,1)).flatten()
        npdata=npdata.flatten()
        npdata=np.hstack((npdata,color)).reshape(1,self.boardSize**2+1).astype(int)
        pred,value=self.model.model_predict_ac(npdata)
        if mask.sum()==0:
            return Move(is_pass=True),value
        if not np.random.randint(50) or isRandom==True:  #2%的几率随机下一步，简单的拓展策略，防止局部最优
            [x,y]=np.where(mask==True)
            moves_=zip(x,y)
            moves=[i for i in moves_]
            random.shuffle(moves)
            point=None
            for i in moves:
                point_=Point(i[0]+1,i[1]+1)
                self.moves=self.moves|{i}
                if self.isPolicyLegal(point_,board,player):
                    point=point_
                    break
            if point is not None:
                pmove=Move(point=point)
            else:
                pmove=Move(is_pass=True)
            return pmove,value
        while True:
            mask_=mask.flatten()
            pred_valid=mask_*pred
            pick=random.choices(range(boardSize*boardSize),weights=pred_valid.flatten())  #网络输出取值按比例，不是固定的
            move=np.unravel_index(pick[0], (boardSize,boardSize))
            self.moves=self.moves|{move}
            point=Point(move[0]+1,move[1]+1)
            if self.isPolicyLegal(point,board,player):
                pmove=Move(point=point)
                break
            else:
                self.moves=self.moves|{move}
                mask[move]=False
                if mask.sum()==0:
                    pmove=Move(is_pass=True)
                    break
                continue
        return pmove,value

class PD_Object():
    #def __init__(self):
        #self.weights_current='current_weights.h5'   #保存最新学到的权重
        #self.weights_old='old_weights.h5'   #保存老版本的权重
    def make_samples(self,rounds,bot1,bot2,hd5file):
        bot1_win=0
        bot2_win=0
        board = Board(size=9)
        game=Game(board)
        for i in range(rounds):
            print(i)
            bot1.reset()
            bot2.reset()
            board.reset()
            game.reset(board)
            #bot1=TrainRobot(rand=False)
            #50%执黑，50%执白
            if np.random.randint(2)==0:
                result=game.run_ac_train(hd5file,play_b=bot1,play_w=bot2,isprint=False)
                if result=='GameResult.wWin':
                    bot2_win+=1
                else:
                    bot1_win+=1
            else:
                result=game.run_ac_train(hd5file,play_b=bot2,play_w=bot1,isprint=False)
                if result=='GameResult.wWin':
                    bot1_win+=1
                else:
                    bot2_win+=1
        return bot1_win,bot2_win

def play_against_the_other(pd,bot1,bot2,loops,hd5file):
    #bot1是训练，bot2是进步后才更新的基准
    bot1_win,bot2_win=pd.make_samples(loops,bot1,bot2,hd5file)
    return bot1_win,bot2_win

if __name__ == "__main__":
    pd=PD_Object()
    hd5file='./trainfile/train_file.h5'
    weights_current='./trainfile/current_weights.h5'   #保存最新学到的权重
    weights_old='./trainfile/old_weights.h5'   #保存老版本的权重
    bot1=TrainRobot()   #最新的权重
    bot2=TrainRobot()   #上一次有进步的权重
    if len(sys.argv) > 1 and int(sys.argv[1])==1:
        bot1.load_weights(weights_current)
    if len(sys.argv) > 2 and int(sys.argv[2])==1:
        bot2.load_weights(weights_old)
    bot1.compile()
    for i in range(10):
        bot1_win,bot2_win=play_against_the_other(pd,bot1,bot2,50,hd5file)
        total=bot1_win+bot2_win
        if binom_test(bot1_win, total, 0.5)<.05 and bot1_win/total>.5:  #bot1显著优秀
            bot1.unload_weights(weights_old)
            bot2.load_weights(weights_old)
        game_records=HDF5(hd5file,mode='r')
        x_train,y_train=game_records.get_ac_dset()
        game_records.closeH5()
        os.remove(hd5file)
        bot1.fit(x_train,y_train)
        bot1.unload_weights(weights_current)
