import numpy as np 
from keras.optimizers import SGD
from keras.models import Sequential, Model

from ..dlgo.agent import Agent
from ..dlgo.board import Move, GameState
from .encoder import ZeroEncoder
from .experience import ZeroExperienceBuffer, ZeroExperienceCollector

class Branch:
    def __init__(self, prior) -> None:
        self.prior = prior
        self.visit_count = 0
        self.total_value = 0.0

class ZeroTreeNode:
    def __init__(self, state:GameState, value, priors, parent, last_move) -> None:
        self.state = state 
        self.value = value 
        self.parent = parent
        self.last_move = last_move

        self.total_visit_count = 1
        self.branches : dict[Move, Branch] = {}
        for move, p in priors.items():
            if state.IsValidMove(move):
                self.branches[move] = Branch(p)
        self.children = {}

    def Moves(self):
        return self.branches.keys()
    
    def AddChild(self, move, child_node):
        self.children[move] = child_node

    def HasChild(self, move):
        return move in self.children
    
    def GetChild(self, move):
        return self.children[move]
    
    def RecordVisit(self, move:Move, value):
        if move == Move.Resign(): return
        if self.branches.get(move) is None: 
            print(f'Error in RecordVisist(): move == {move}')
            return
        self.total_visit_count += 1
        self.branches[move].visit_count += 1
        self.branches[move].total_value += value

    def ExceptedValue(self, move):
        branch = self.branches[move]
        if branch.visit_count == 0:
            return 0.0
        return branch.total_value / branch.visit_count
    
    def Prior(self, move):
        return self.branches[move].prior
    
    def VisitCount(self, move):
        if move in self.branches:
            return self.branches[move].visit_count
        return 0

class ZeroAgent(Agent):
    def __init__(self, model:Model, encoder: ZeroEncoder, rounds_per_move=1600, c=2.0) -> None:
        super().__init__()
        self.model = model
        self.encoder = encoder
        self.num_rounds = rounds_per_move
        self.c = c 

        self.collector :ZeroExperienceCollector = None 

    def SelectMove(self, game_state: GameState):
        root = self.CreateNode(game_state)
        for i in range(self.num_rounds):
            node = root
            next_move = self.SelectBranch(node)
            while node.HasChild(next_move):
                node = node.GetChild(next_move)
                next_move = self.SelectBranch(node)
            new_state = node.state.ApplyMove(next_move)
            child_node = self.CreateNode(new_state, move=next_move, parent=node)
            move = next_move
            value = -1 * child_node.value
            while node is not None:
                node.RecordVisit(move, value)
                move = node.last_move
                node = node.parent
                value = -1 * value 
        if self.collector is not None:
            root_state_tensor = self.encoder.Encode(game_state)
            visit_counts = np.array([
                root.VisitCount(self.encoder.DecodeMove(index)) 
                for index in range(self.encoder.NumberMoves())
            ])
            self.collector.RecordDecision(root_state_tensor, visit_counts)

        return max(root.Moves(), key=root.VisitCount)
    
    def SetCollector(self, collector):
        self.collector = collector

    def SelectBranch(self, node:ZeroTreeNode):
        total = node.total_visit_count
        def score_branch(move):
            q = node.ExceptedValue(move)
            p = node.Prior(move)
            n = node.VisitCount(move)
            return q + self.c * p * np.sqrt(total) / (n+1)
        
        if len(node.Moves()) == 0: 
            print('Error in SelectBranch(), node.Moves() == 0')
            return Move.Pass()
        return max(node.Moves(), key=score_branch)
    
    def CreateNode(self, game_state:GameState, move:Move=None, parent: ZeroTreeNode=None) -> ZeroTreeNode:
        state_tensor = self.encoder.Encode(game_state)
        model_input = np.array([state_tensor])
        priors, values = self.model.predict(model_input)
        priors = priors[0]
        value = values[0][0]
        move_priors = {
            self.encoder.DecodeMove(i) : p 
            for i,p in enumerate(priors)
        }
        new_node = ZeroTreeNode(game_state, value, move_priors, parent, move)
        if parent is not None:
            parent.AddChild(move, new_node)
        return new_node

    def Train(self, experience:ZeroExperienceBuffer, learning_rate, batch_size):
        num_examples = experience.states.shape[0]
        model_input = experience.states
        visit_sums = np.sum(experience.visit_counts, axis=1).reshape((num_examples, 1))
        action_target = experience.visit_counts / visit_sums
        value_target = experience.rewards
        self.model.compile(SGD(learning_rate=learning_rate),
                           loss=['categorical_crossentropy', 'mse'])
        self.model.fit(model_input, [action_target, value_target],
                       batch_size=batch_size)