from os.path import dirname, abspath
import sys 
sys.path.append(dirname(dirname(abspath(__file__))))

from weiqi.zero.agent import ZeroAgent, ZeroEncoder
from weiqi.zero.experience import ZeroExperienceBuffer, ZeroExperienceCollector, CombineExperiences
from weiqi.dlgo.board import GameState, Board, Move 
from weiqi.dlgo.utils import Player
from weiqi.dlgo.score import ComputeGameResult
from keras.layers import Conv2D, Dense, Flatten, Input
from keras.models import Model
import h5py

def simulate_game(board_size, b_agent:ZeroAgent, b_collector:ZeroExperienceCollector, 
                  w_agent:ZeroAgent, w_collector:ZeroExperienceCollector):
    game_state = GameState.NewGame(board_size)
    agents = {Player.black: b_agent, Player.white: w_agent }
    b_collector.BeginEpisode()
    w_collector.BeginEpisode()
    while not game_state.IsOver():
        next_move = agents[game_state.next_player].SelectMove(game_state)
        game_state = game_state.ApplyMove(next_move)
    result = ComputeGameResult(game_state)
    if result.Winner == Player.black:
        b_collector.CompleteEpisode(1)
        w_collector.CompleteEpisode(-1)
    else:
        b_collector.CompleteEpisode(-1)
        w_collector.CompleteEpisode(1)

def run_zero():
    board_size = 19
    encoder = ZeroEncoder(board_size)
    board_input = Input(shape=encoder.Shape(), name='board_input')
    pb = board_input
    last = 'channels_last'
    for i in range(4):
        pb = Conv2D(64, (3,3), padding='same', data_format=last, activation='relu')(pb)
    
    policy_conv = Conv2D(2, (1,1), data_format=last, activation='relu')(pb)
    poliy_flat = Flatten()(policy_conv)
    policy_output = Dense(encoder.NumberMoves(), activation='softmax')(poliy_flat)

    value_conv = Conv2D(1, (1,1), data_format=last, activation='relu')(pb)
    value_flat = Flatten()(value_conv)
    value_hidden = Dense(256, activation='relu')(value_flat)
    value_output = Dense(1, activation='tanh')(value_hidden)

    model = Model(inputs=[board_input], outputs=[policy_output, value_output])
    b_agent = ZeroAgent(model=model, encoder=encoder, rounds_per_move=10, c=2.0)
    w_agent = ZeroAgent(model=model, encoder=encoder, rounds_per_move=10, c=2.0)
    c1 = ZeroExperienceCollector()
    c2 = ZeroExperienceCollector()
    b_agent.SetCollector(c1)
    w_agent.SetCollector(c2)

    for i in range(2):
        simulate_game(board_size=board_size,b_agent=b_agent, b_collector=c1, 
                      w_agent=w_agent, w_collector=c2)
    
    exp = CombineExperiences([c1,c2])
    b_agent.Train(exp, 0.01, 2048)

    with h5py.File('zero_agent.h5', 'w') as f:
        exp.Serialize(f)
        print('OK!')

if __name__ == '__main__':
    run_zero()