import sys
from os.path import dirname, abspath
RootDir = dirname(dirname(abspath(__file__)))
sys.path.append(RootDir)

from weiqi.data.paraller_processor import GoDataProcessor
from weiqi.dlgo.encoder import AlphaGoEncoder
from weiqi.dlgo.agent import DeepLearningAgent
from weiqi.dlgo.networks.alphago import AlphaGoModel

from keras.callbacks import ModelCheckpoint
import h5py

def run_alphago_sl():
    rows,cols = 19,19
    num_classes = rows*cols
    num_ganes = 100

    encoder = AlphaGoEncoder()
    processor = GoDataProcessor(encoder_name=encoder.name)
    generator = processor.LoadData('train', num_ganes, use_generator=True)
    test_generator = processor.LoadData('test', num_ganes, use_generator=True)

    input_shape = (encoder.num_planes, rows, cols)
    alphago_sl_policy = AlphaGoModel(input_shape, is_policy_net=True)
    alphago_sl_policy.compile('sgd', 'categorical_crossentropy', metrics=['accuracy'])

    epochs = 200
    batch_size = 128
    alphago_sl_policy.fit_generator(
        generator=generator.Generate(batch_size, num_classes),
        epochs=epochs,
        steps_per_epoch=generator.GetNumberSamples() / batch_size,
        validation_data=test_generator.Generate(batch_size, num_classes),
        validation_steps=test_generator.GetNumberSamples() / batch_size,
        callbacks=[ModelCheckpoint('alphago_sl_policy_{epoch}.h5')]
    )

    alphago_agent = DeepLearningAgent(alphago_sl_policy, encoder)
    with h5py.File('alphago_sl_policy.h5', 'w') as f:
        alphago_agent.Serialize(f)

    alphago_sl_policy.evaluate_generator(
        generator=test_generator.Generate(batch_size, num_classes),
        steps=test_generator.GetNumberSamples() / batch_size
    )

if __name__ == '__main__':
    run_alphago_sl()

