from __future__ import print_function
import torch
import sys
sys.path.append('../')
from utility.hdf5 import HDF5
from board_fast import Board,Game,Player,Point,Move
from sgf_parser.sgf_parser_to_game import sgf_parser
import os
from pathlib import Path
import numpy as np
import random
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import time
from scipy.stats import binom_test

'''
filePath="./game_recorders/game_recorders.h5"
games=HDF5(filePath,mode='r')
dataGenerator=games.yeilds_data
trainloader = torch.utils.data.DataLoader(dataGenerator, batch_size=4)
dataiter = iter(trainloader)
images, labels = dataiter.next()
print(images, labels)

for i, data in enumerate(trainloader, 0):
    inputs, labels = data
    print(inputs,labels)
'''

model_current='./model/dqn_current.pt'
model_old='./model/dqn_old.pt'

class TrainRobot():
    def __init__(self,net=True,boardSize=9,rand=False,load=None):
        if net:
            self.net=Net()
        else:
            self.net=None
        if net and load is not None:
            if load=='model_current':
                self.net.load_state_dict(torch.load(model_current))
            elif load=='model_old':
                self.net.load_state_dict(torch.load(model_old))
            else:
                None
        else:
            None
        self.boardSize=boardSize
        self.moves=frozenset()  #{(input,output)}
        self.rand=rand
        #self.errCollections=[]  #记录网络走出的异常落子，这些走法都是非法的，要让网络学会（不包含全局同形）[(input,output)]


    def reset(self):
        self.moves=frozenset()


    def isPolicyLegal(self,point,board,player):
        neighbours=board.get_neighbors(point)
        is_eye=True
        for i in neighbours:
            if board.grid.get(i)==None:
                is_eye=False
                break
            elif board.grid.get(i).player != player:
                is_eye=False
                break
            elif len(board.grid.get(i).liberties)<=1:
                is_eye=False
                break
            else:
                pass
        if is_eye:
            return False
        return True


    def getLegalMoves(self,board,player):
        npboard=board.print_board(isprint=False)
        moves=frozenset()
        idx=np.argwhere(npboard==0)
        for i in idx:
            point=Point(i[0]+1,i[1]+1)
            if self.isPolicyLegal(point,board,player):
                moves=moves|{(i[0],i[1])}
        moves=moves-self.moves
        return moves

    def predict(self,player,board,reChoose=False,isRandom=None): #game_input的形状是棋盘形状
        if not reChoose:
            self.moves=frozenset()
        boardSize=self.boardSize
        #mask=np.ones((self.boardSize,self.boardSize))   #让网络学会已有子的地方不能落子,一开始，所有的棋盘点都可落子
        if isRandom:    #这个判断可以强制是的predict采用随机
            rand=True
        else:
            rand=self.rand
        if not np.random.randint(100):   #1%的几率一定使用随机来预测,加入这个扰动，防止陷入局部最优
            rand=True
        if rand:
            npdata=board.print_board(isprint=False)
            mask=(npdata==0).astype(int)
            for i in self.moves:
                mask[i]=False
            if mask.sum()==0:
                return Move(is_pass=True)
            [x,y]=np.where(mask==True)
            moves_=zip(x,y)
            moves=[i for i in moves_]
            random.shuffle(moves)
            point=None
            for i in moves:
                point_=Point(i[0]+1,i[1]+1)
                self.moves=self.moves|{i}
                if self.isPolicyLegal(point_,board,player):
                    point=point_
                    break
            if point is not None:
                move=Move(point=point)
            else:
                move=Move(is_pass=True)
            return move
        else:    #使用网络
            if not reChoose:
                self.moves=frozenset()
            boardSize=int(np.sqrt(board.height*board.width))
            moves=self.getLegalMoves(board,player)
            if len(moves)==0:
                return Move(is_pass=True)
            choices=[]
            for i in moves:
                npout=np.zeros((boardSize,boardSize))
                npout[i]=1
                npout=np.array(npout.flatten(), dtype=np.float32).reshape(1,-1)
                npplayer=[1] if player==Player.black else [-1]
                npplayer=np.array(npplayer,dtype=np.float32).reshape(1,1)
                npboard=board.print_board(isprint=False)
                npboard=np.array(npboard,dtype=np.float32).reshape(1,1,boardSize,boardSize)
                with torch.no_grad():
                    output=self.net(npboard,npplayer,npout)
                output=output.numpy().flatten()
                choices.append(output)
            choices=np.array(choices).flatten()
            move_idx=random.choices(range(choices.size),weights=choices)  #网络输出取值按比例，不是固定的
            moves=list(moves)
            move=moves[move_idx[0]]
            '''
            move_idx=np.argmax(choices, axis=None)
            moves=list(moves)
            move=moves[move_idx]
            '''
            self.moves=self.moves|{move}
            point=Point(move[0]+1,move[1]+1)
            return Move(point=point)

class PD_Object():
    #def __init__(self):
        #self.weights_current='current_weights.h5'   #保存最新学到的权重
        #self.weights_old='old_weights.h5'   #保存老版本的权重
    def make_samples(self,rounds,bot1,bot2):
        bot1_win=0
        bot2_win=0
        board = Board(size=9)
        game=Game(board)
        for i in range(rounds):
            bot1.reset()
            bot2.reset()
            board.reset()
            game.reset(board)
            #bot1=TrainRobot(rand=False)
            #50%执黑，50%执白
            if np.random.randint(2)==0:
                result=game.run_train(play_b=bot1,play_w=bot2,isprint=False)
                if result=='GameResult.wWin':
                    bot2_win+=1
                else:
                    bot1_win+=1
            else:
                result=game.run_train(play_b=bot2,play_w=bot1,isprint=False)
                if result=='GameResult.wWin':
                    bot1_win+=1
                else:
                    bot2_win+=1
        return bot1_win,bot2_win   #返回bot1和bot2的胜数以及

def play_against_the_other(dq,bot1,bot2,loops):
    return dq.make_samples(loops,bot1,bot2)

def make_tran_data(games_doc,train_file,file_num=0):   #100个文件也就10秒钟最多了，所以不并行了,file_name用来控制并行时候获取多少个文件就退出
    file_lists=list_all_file(games_doc)
    while file_num and len(file_lists)<file_num:
        time.sleep(1)
    for i in file_lists:
        try:
            one_sgf=sgf_parser(i,h5file=train_file)
            one_sgf.setSamples()
            #one_sgf.showGame()
            one_sgf.closeParser()
        except: #如果文件名有雷同，就会出错,但是跳过没关系
            print("error,filename:",i)
        else:
            None
        os.remove(i)    #处理完了就删掉

def list_all_file(fPath):   #获取目录下的全部文件
    fileList=os.listdir(fPath)
    file_list=[]
    for i in fileList:
        file_dir=Path(fPath+'/'+i)
        if file_dir.is_file():
            file_list.append(file_dir)
    return file_list

class Net(nn.Module):

    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = nn.Conv2d(1, 81, 2)    #9->8
        self.conv2 = nn.Conv2d(81, 64, 2)   #8->7
        self.conv3 = nn.Conv2d(64, 32, 2)   #7->6

        self.fc1 = nn.Linear(32 * 6 * 6+9*9+1, 1024*4)
        self.fc2 = nn.Linear(1024*4, 512*6)
        self.fc3 = nn.Linear(512*6, 512*2)
        self.fc4 = nn.Linear(512*2, 1)

    def forward(self, x1,x2,x3):
        x1=torch.from_numpy(x1)
        x1 = torch.tanh(self.conv1(x1))
        x1 = torch.tanh(self.conv2(x1))
        x1 = torch.tanh(self.conv3(x1))
        x1 = x1.view(-1, self.num_flat_features(x1))
        x2=torch.from_numpy(x2)
        x3=torch.from_numpy(x3)
        x=torch.cat((x1,x2,x3),-1)
        x = torch.tanh(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = F.relu(self.fc3(x))
        x = torch.sigmoid(self.fc4(x))
        return x

    def num_flat_features(self, x):
        size = x.size()[1:]  # all dimensions except the batch dimension
        num_features = 1
        for s in size:
            num_features *= s
        return num_features


if __name__ == '__main__':
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    dq=PD_Object()
    games_doc='./lr_doc/'
    data_file='./dl_p/dq_train_file.h5'
    model_file='./model/dqn.pt'
    bot1=TrainRobot(net=True)
    bot2=TrainRobot(net=True)
    optimizer = optim.SGD(bot1.net.parameters(), lr=0.002)
    criterion = nn.MSELoss()

    for i in range(5000):
        print(i)
        start=time.perf_counter()
        bot1_win,bot2_win=play_against_the_other(dq,bot1,bot2,20)
        total=bot1_win+bot2_win
        if binom_test(bot1_win, total, 0.5)<.05 and bot1_win/total>.5:  #bot1显著优秀
            torch.save(bot1.net.state_dict(), model_old)
            bot2.net.load_state_dict(torch.load(model_old))
        else:
            None
        print("1:",time.perf_counter()-start)
        start=time.perf_counter()
        make_tran_data(games_doc,data_file)
        print("2:",time.perf_counter()-start)
        start=time.perf_counter()
        games=HDF5(data_file,mode='r')
        x_,y_=games.get_dl_dset()
        train_size=y_.shape[0]
        games.closeH5()
        os.remove(data_file)
        print("3:",time.perf_counter()-start)
        start=time.perf_counter()
        x_train_1=x_[:,:-2]
        x_train_2=x_[:,-2]  #player
        x_train_3=y_
        y_train=np.zeros(x_train_2.shape)
        winner=x_[:,-1]
        for  i,y in enumerate(x_train_2==winner):  #查找落子方是不是胜利方
            if y==False:    #如果不是，就不要学习这一步
                y_train[i]=0
            else:
                y_train[i]=1
        y_train = np.array((y_train).reshape(-1,1),dtype=np.float32)
        y_train=torch.from_numpy(y_train)
        boardsize=int(np.sqrt(x_train_1.shape[1]))
        x_train_1=np.array(x_train_1.reshape(-1,1,boardsize,boardsize), dtype=np.float32)
        x_train_2=np.array(x_train_2.reshape(-1,1), dtype=np.float32)
        x_train_3=np.array(x_train_3.reshape(-1,x_train_3.shape[1]), dtype=np.float32)
        '''
        x_train_1=torch.from_numpy(x_train_1.reshape(-1,1,boardsize,boardsize))
        x_train_1 = torch.tensor(x_train_1, dtype=torch.float32)
        x_train_2=torch.from_numpy(x_train_2.reshape(-1,1))
        x_train_2 = torch.tensor(x_train_2, dtype=torch.float32)
        x_train_3=torch.from_numpy(x_train_3.reshape(-1,x_train_3.shape[1]))
        x_train_3 = torch.tensor(x_train_3, dtype=torch.float32)
        '''
        print("4:",time.perf_counter()-start)
        start=time.perf_counter()
        losscount=0
        indexes=[i for i in range(train_size)]
        random.shuffle(indexes)
        for i in indexes:
            optimizer.zero_grad()
            output=bot1.net(x_train_1[i:i+1],x_train_2[i:i+1],x_train_3[i:i+1])
            loss = criterion(output, y_train[i:i+1])
            losscount+=loss
            loss.backward()
            optimizer.step()
        print('loss:',losscount/train_size)
        print("5:",time.perf_counter()-start)
        torch.save(bot1.net.state_dict(), model_current)
    torch.save(bot1.net.state_dict(), model_file)
