from collections import namedtuple
from ..board import GameState, Move
from ...utils import R
from ..gotypes import Player
from ..agent import Agent, PolicyAgent, PolicyGradientLoss
from ..score import ComputeGameResult
from .experience import ExperienceCollector, CombineExperience
import h5py
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.optimizers import SGD

from ..networks.large import Layers
from ..utils import R as DR


class GameRecord(namedtuple('GameRecord', 'moves winner margin')):
    pass 

def SimulateGame(b_agent:Agent, w_agent:Agent):
    moves = []
    game_state = GameState.NewGame(R.BoardSize)
    agents = {Player.black : b_agent,
              Player.white: w_agent }
    while not game_state.IsOver():
        m = agents[game_state.next_player].SelectMove(game_state)
        moves.append(m)
        game_state = game_state.ApplyMove(m)
    result = ComputeGameResult(game_state)
    print(result)
    return GameRecord(moves=moves, winner=result.Winner, margin=result.WinnerMargin)

def ExperienceSimulate(num_games, agent1:PolicyAgent, agent2:PolicyAgent):
    c1 = ExperienceCollector()
    c2 = ExperienceCollector()
    player: Player = Player.black
    for i in range(num_games):
        c1.BeginEpisode()
        agent1.SetCollector(c1)
        c2.BeginEpisode()
        agent2.SetCollector(c2)
        if player == Player.black:
            b, w = agent1, agent2
        else:
            w, b = agent1, agent2
        record = SimulateGame(b,w)
        if record.winner == player:
            c1.CompleteEpisode(reward=1)
            c2.CompleteEpisode(reward=-1)
        else:
            c1.CompleteEpisode(reward=-1)
            c2.CompleteEpisode(reward=1)
        player = player.Other
    return CombineExperience([c1,c2])

def RunExperience(filename,n, a1,a2):
    ex = ExperienceSimulate(n, a1,a2)
    with h5py.File(filename, 'w') as f:
        ex.Serialize(f)

def GetPolicyAgent():
    encoder = DR.GetEncoderByName('simple', R.BoardSize)

    model = Sequential()
    for layer in Layers(encoder.Shape()):
        model.add(layer=layer)
    model.add(Dense(encoder.NumberPoints()))
    model.add(Activation('softmax'))
    opt = SGD(learning_rate=0.02)
    model.compile(loss=PolicyGradientLoss, optimizer=opt)
    agent = PolicyAgent(model, encoder)
    return agent

def run_policy():
    filename = 'polic_agent.h5'
    n = 5
    a1 = GetPolicyAgent()
    a2 = GetPolicyAgent()
    RunExperience(filename,n,a1,a2)
