
from DQN import DQN
import tensorflow as tf
import numpy as np
import random
from game_env import Game,Board
#https://deepmind.com/blog/alphago-zero-learning-scratch/
from mcts_pure import MCTSPlayer as MCTS_Pure
from game_style import gamestyle

# Hyper Parameters
ENV_NAME = 'CartPole-v0'
EPISODE = 10000 # Episode limitation
#STEP = 300 # Step limitation in an episode

is_shown =True

continueTrain=False

def main():
    mboard = Board()
    mboard.init_board()
    env = Game(mboard)
    STEP=mboard.width*mboard.height
    model_file=None
    if continueTrain:
        model_file = './current_policy.model'
    agent = DQN(model_file=model_file)
    pure_mcts_player = MCTS_Pure(c_puct=5,n_playout=1000)

    for episode in range(EPISODE):
        # initialize task
        mboard.init_board()
        # Train
        if True:#episode%100==0:
            print('episode:',episode)
        p1, p2 = mboard.players  #[1, 2]
        new_game_idx=len(agent.replay_buffer)
        for step in range(STEP):

            #move = getSimple_action()     # 人工下棋步骤
            current_state = mboard.current_state()
            current_player = mboard.current_player
            current_availables=mboard.availables
            action=0
            if current_player==1:
                action = agent.DQN_ACTION(current_state,mboard.availables)  # e-greedy action for train # AI 下棋
            else:
                action=pure_mcts_player.get_action(mboard)
            #print('action',action)
            mboard.do_move(action)
            next_state = mboard.current_state()
            if is_shown and episode<100:
                env.graphic(mboard, p1, p2)

            end, winner = mboard.game_end()
            #print('current_player:',current_player,'winner:',winner,'end:',end)
            reward=0
            if end:
                if current_player==winner:
                    reward=0.99
                if current_player!=winner:
                    reward=-0.99
                if current_player==1 and current_player==winner:
                    print('Q network win at episode :',episode)
                agent.perceive(current_state, action, reward, next_state, end, new_game_idx,winner)
                break


            # Define reward for agent
            if not end:

                reward,fx=gamestyle(action,mboard,current_player,current_availables)
                #print('===>reward:', reward, 'fx:', fx,'action:',action)
                if reward<=0:
                    reward = 0.000001
                else:
                    #print('reward:',reward,'fx:',fx,'action:',action)
                    pass


            agent.perceive(current_state, action, reward, next_state, end,new_game_idx,0)







if __name__ == '__main__':
  main()