import random
from typing import Any
import numpy as np

from .board import GameState, IsPointAnEye, Move
from .gotypes import Point
from .encoder import Encoder, SevenPlaneEncoder, SimpleEncoder
from keras.models import Sequential
from keras.optimizers import SGD
from keras import backend

from h5py import File
from .utils import SaveModelToH5File, LoadModelFromH5File, R as DR
from .score import ComputeGameResult
from .rl.experience import ExperienceCollector

class Agent:
    def __init__(self) -> None:
        pass

    def SelectMove(self, game_state: GameState):
        raise NotImplementedError

    def Diagnostics(self):
        return {}


class RandmomAgent(Agent):
    def SelectMove(self, game_state: GameState):
        candidates = []
        for r in range(1, game_state.board.rows + 1):
            for c in range(1, game_state.board.cols + 1):
                candidate = Point(r, c)
                if game_state.IsValidMove(Move.Play(candidate)) and not IsPointAnEye(
                    game_state.board, candidate, game_state.next_player
                ):
                    candidates.append(candidate)
        if not candidates:
            return Move.Pass()
        return Move.Play(random.choice(candidates))


class FastRandomAgent(Agent):
    def __init__(self) -> None:
        super().__init__()
        self.dim = None
        self.point_cache = []

    def UpdateCache(self, dim):
        self.dim = dim
        rows, cols = dim
        self.point_cache = []
        for r in range(1, rows + 1):
            for c in range(1, cols + 1):
                self.point_cache.append(Point(r, c))

    def SelectMove(self, game_state: GameState):
        dim = (game_state.board.rows, game_state.board.cols)
        if dim != self.dim:
            self.UpdateCache(dim)
        index = np.arange(len(self.point_cache))
        np.random.shuffle(index)
        for i in index:
            p = self.point_cache[i]
            if game_state.IsValidMove(Move.Play(p)) and not IsPointAnEye(
                game_state.board, p, game_state.next_player
            ):
                return Move.Play(p)
        return Move.Pass()


class DeepLearningAgent(Agent):
    def __init__(self, model: Sequential, encoder: SevenPlaneEncoder) -> None:
        super().__init__()
        self.model = model
        self.encoder = encoder

    def Predict(self, game_state: GameState):
        encoded_state = self.encoder.Encode(game_state)
        input_tensor = np.array([encoded_state])
        return self.model.predict(input_tensor)[0]

    def SelectMove(self, game_state: GameState):
        num_moves = self.encoder.board_width * self.encoder.board_height
        move_probs = self.Predict(game_state)
        move_probs = move_probs**3
        eps = 1e-6
        move_probs = np.clip(move_probs, eps, 1 - eps)
        move_probs = move_probs /np.sum(move_probs)
        candidates = np.arange(num_moves)
        ranked_moves = np.random.choice(
            candidates, num_moves, replace=False, p=move_probs
        )
        for i in ranked_moves:
            point = self.encoder.DecodePoint(i)
            if game_state.IsValidMove(Move.Play(point)) and not IsPointAnEye(
                game_state.board, point, game_state.next_player
            ):
                return Move.Play(point)
        return Move.Pass()

    def Serialize(self, h5file: File):
        h5file.create_group("encoder")
        h5file["encoder"].attrs["name"] = self.encoder.name
        h5file["encoder"].attrs["board_width"] = self.encoder.board_width
        h5file["encoder"].attrs["board_height"] = self.encoder.board_height
        h5file.create_group("model")
        SaveModelToH5File(self.model, h5file['model'])

def LoadPredictionAgent(h5file: File):
    model = LoadModelFromH5File(h5file['model'])
    encoder_name = h5file['encoder'].attrs['name']
    if not isinstance(encoder_name, str):
        encoder_name = encoder_name.decode('utf-8')
    board_width = h5file['encoder'].attrs['board_width']
    board_height = h5file["encoder"].attrs['board_height']
    encoder = DR.GetEncoderByName(encoder_name, (board_width, board_height))
    return DeepLearningAgent(model, encoder)
        
class TerminationAgent(Agent):
    def __init__(self, agent: Agent, strategy=None) -> None:
        super().__init__()
        self.agent = agent
        self.strategy = strategy if strategy is not None else TerminationStrategy()

    def SelectMove(self, game_state: GameState):
        if self.strategy.ShouldPass(game_state):
            return Move.Pass()
        elif self.strategy.ShouldResign(game_state):
            return Move.Resign()
        else:
            return self.agent.SelectMove(game_state)

def GetPassStrategy(terminate):
    if terminate == 'opponent_passes':
        return PassWhenOpponentPassesStrategy()
    else:
        raise ValueError(f'Not surpported termination strategy {terminate}')
    
class TerminationStrategy:
    def __init__(self) -> None:
        pass

    def ShouldPass(self, game_state:GameState):
        return False
    
    def ShouldResign(self, game_state:GameState):
        return False
    
class PassWhenOpponentPassesStrategy(TerminationStrategy):
    def ShouldPass(self, game_state:GameState):
        if game_state.last_move is not None:
            return True if game_state.last_move.is_pass else False
        
class ResignLargeMarginStrategy(TerminationStrategy):
    def __init__(self, own_color, cut_off_move, margin) -> None:
        super().__init__()
        self.own_color = own_color
        self.cut_off_move = cut_off_move
        self.margin = margin
        self.move_played = 0

    def ShouldPass(self, game_state: GameState):
        return False
    
    def ShouldResign(self, game_state: GameState):
        self.move_played += 1
        if self.move_played:
            result = ComputeGameResult(game_state)
            if result.Winner != self.own_color and result.WinnerMargin >= self.margin:
                return True
        return False

class PolicyAgent(Agent):
    def __init__(self, model:Sequential, encoder:SimpleEncoder) -> None:
        super().__init__()  
        self.model:Sequential = model 
        self.encoder = encoder
        self.collector:ExperienceCollector = None 
        self.temperature = 0.0 

    def Predict(self, game_state:GameState):
        encoded_state = self.encoder.Encode(game_state)
        input_tensor = np.array([encoded_state])
        return self.model.predict(input_tensor)[0]
    
    def SetTemperature(self, temperature):
        self.temperature = temperature

    def SetCollector(self, collector):
        self.collector = collector

    def SelectMove(self, game_state: GameState):
        num_moves = self.encoder.board_width * self.encoder.board_height
        board_tensor = self.encoder.Encode(game_state)
        x = np.array([board_tensor])
        if np.random.random() < self.temperature:
            move_probs = np.ones(num_moves) / num_moves
        else:
            move_probs = self.model.predict(x)[0]
        eps = 1e-5
        move_probs = np.clip(move_probs, eps, 1-eps)
        move_probs = move_probs / np.sum(move_probs)
        candidates = np.arange(num_moves)
        ranked_moves = np.random.choice(candidates, num_moves, replace=False, p=move_probs)
        for i in ranked_moves:
            point = self.encoder.DecodePoint(i)
            if game_state.IsValidMove(Move.Play(point)) and not IsPointAnEye(
                    game_state.board,point, game_state.next_player):
                if self.collector is not None:
                    self.collector.RecordDecision(state=board_tensor, action=i)
                return Move.Play(point)
        return Move.Pass()

    def Serialize(self, h5file: File):
        h5file.create_group('encoder')
        h5file['encoder'].attrs['name'] = self.encoder.name 
        h5file['encoder'].attrs['board_width'] = self.encoder.board_width
        h5file['encoder'].attrs['board_height'] = self.encoder.board_height
        h5file.create_group('model')
        SaveModelToH5File(self.model, h5file['model'])

    def Train(self, experience, lr=0.0000001, clipnorm=1.0, batch_size=512):
        opt = SGD(lr=lr, clipnorm=clipnorm)
        self.model.compile(loss='categorical_crossentropy', optimizer=opt)
        n = experience.states.shape[0]
        num_moves = self.encoder.board_width * self.encoder.board_height
        y = np.zeros(n, num_moves)
        for i in range(n):
            action = experience.actions[i]
            reward = experience.rewards[i]
            y[i][action] = reward
        self.model.fit(experience.states, y, batch_size=batch_size, epochs=1)

def PolicyGradientLoss(y_true, y_pred):
    clip_pred = backend.clip(y_pred, backend.epsilon(), 1- backend.epsilon())
    loss = -1 * y_true * backend.log(clip_pred)
    return backend.mean(backend.sum(loss, axis=1))

def Normalize(x):
    total = np.sum(x)
    return x / total

def LoadPolicyAgent(h5file:File):
    model = LoadModelFromH5File(h5file['model'], custom_objects={'policy_gradient_loss': PolicyGradientLoss})
    encoder_name = h5file['encoder'].attrs['name']
    if not isinstance(encoder_name, str):
        encoder_name = encoder_name.decode('utf-8')
    board_width = h5file['encoder'].attrs['board_width']
    board_height = h5file['encoder'].attrs['board_height']
    encoder = DR.GetEncoderByName(encoder_name, (board_width, board_height))
    return PolicyAgent(model, encoder)

class QAgent(Agent):
    def __init__(self, model:Sequential, encoder:SimpleEncoder, policy='eps-greedy') -> None:
        super().__init__()
        self.model = model 
        self.encoder = encoder
        self.policy =policy
        self.collector = None 
        self.temperature = 0.0 
        self.last_move_value = 0

    def SetTemperature(self, temperature):
        self.temperature = temperature

    def SetCollector(self, collector):
        self.collector = collector

    def SetPolicy(self, policy):
        if policy not in ('eps-greedy', 'weighted'):
            raise ValueError(policy)
        self.policy = policy

    def SelectMove(self, game_state: GameState):
        board_tensor = self.encoder.Encode(game_state)
        moves = []
        board_tensors = []
        for move in game_state.LegalMoves():
            if not move.is_play:
                continue
            moves.append(self.encoder.EncodePoint(move.point))
            board_tensors.append(board_tensor)
        if not moves:
            return Move.Pass()
        
        num_moves = len(moves)
        board_tensors = np.array(board_tensors)
        move_vectors = np.zeros(num_moves, self.encoder.NumberPoints())
        for i, move in enumerate(moves):
            move_vectors[i][move] = 1

        values = self.model.predict([board_tensors, move_vectors])
        values = values.reshape(len(moves))

        if self.policy == 'eps-greedy':
            ranked_moves = self.RankMovesEpsGreedy(values)
        elif self.policy == 'weighted':
            ranked_moves = self.RankMovesweighted(values)
        else:
            ranked_moves = None 
        
        for mi in ranked_moves:
            point = self.encoder.DecodePoint(moves[mi])
            if not IsPointAnEye(game_state.board, point, game_state.next_player):
                if self.collector is not None:
                    self.collector.RecordDecision(state = board_tensor, action = moves[mi])
                self.last_move_value = float(values[mi])
                return Move.Play(point)
        return Move.Pass()
    
    def RankMovesEpsGreedy(self, values):
        if np.random.random() < self.temperature:
            values = np.random.random(values.shape)
        ranked_moves= np.argsort(values)
        return ranked_moves[::-1]
    
    def RankMovesWeighted(self, values):
        p = values / np.sum(values)
        p = np.power(p, 1.0 /self.temperature)
        p = p / np.sum(p)
        return np.random.choice(np.arange(0, len(values)), size=len(values), p=p, replace=False)
    
    def Train(self, experience, lr=0.1, batch_size = 128):
        opt = SGD(learning_rate=lr)
        self.model.compile(loss='mse', optimizer= opt)
        n = experience.states.shape[0]
        num_moves = self.encoder.NumberPoints()
        y = np.zeros((n,))
        actions = np.zeros((n, num_moves))
        for i in range(n):
            action = experience.actions[i]
            reward = experience.rewards[i]
            actions[i][action] = 1
            y[i] = 1 if reward > 0 else 0
        self.model.fit([experience.states, actions], y, batch_size=batch_size, epochs=1)

    def Serialize(self, f:File):
        f.create_group('encoder')
        f['encoder'].attrs['name'] = self.encoder.name
        f['encoder'].attrs['board_width'] = self.encoder.board_width
        f['encoder'].attrs['board_height'] = self.encoder.board_height
        f.create_group('model')
        SaveModelToH5File(self.model, f['model'])

    def Diagnostics(self):
        return {'value': self.last_move_value}
    
def LoadQAgent(f:File):
    model = LoadModelFromH5File(f['model'])
    encoder_name= f['encoder'].attrs['name']
    if not isinstance(encoder_name, str):
        encoder_name = encoder_name.decode('utf-8')
    board_width = f['encoder'].attrs['board_width']
    board_height = f['encoder'].attrs['board_heighted']
    encoder = DR.GetEncoderByName(encoder_name, (board_width, board_height))
    return QAgent(model, encoder)

class ACAgent(Agent):
    def __init__(self, model:Sequential, encoder:SimpleEncoder) -> None:
        super().__init__()
        self.model = model 
        self.encoder = encoder
        self.collector = None 
        self.temperature = 1.0
        self.last_state_value = 0

    def SetTemperature(self, temperature):
        self.temperature = temperature

    def SetCollector(self, collector):
        self.collector = collector

    def SelectMove(self, game_state: GameState):
        num_moves = self.encoder.board_width * self.encoder.board_height
        board_tensor = self.encoder.Encode(game_state)
        x = np.array([board_tensor])

        actions, values = self.model.predict(x)
        move_probs = actions[0]
        estimated_value = values[0][0]
        self.last_state_value = float(estimated_value)

        move_probs = np.power(move_probs, 1.0 / self.temperature)
        move_probs = move_probs / np.sum(move_probs)
        eps = 1e-6
        move_probs = np.clip(move_probs, eps, 1 - eps)
        move_probs = move_probs / np.sum(move_probs)

        candidates = np.arange(num_moves)
        ranked_moves = np.random.choice(candidates, num_moves, replace=False, p=move_probs)
        for i in ranked_moves:
            point = self.encoder.DecodePoint(i)
            if game_state.IsValidMove(Move.Play(point)) and not IsPointAnEye(
                game_state.board, point, game_state.next_player ):
                if self.collector is not None:
                    self.collector.RecordDecision(state=board_tensor, 
                                                  action=i,
                                                  estimated_value=estimated_value)
                return Move.Play(point)
        return Move.Pass()
    
    def Train(self, experience, lr=0.1, batch_size=128):
        opt = SGD(learning_rate=lr, clipvalue=0.2)
        self.model.compile(optimizer=opt, loss=['categorical_crossentropy', 'mse'])
        n = experience.states.shape[0]
        num_moves = self.encoder.NumberPoints()
        policy_target = np.zeros((n, num_moves))
        value_target = np.zeros((n,))
        for i in range(n):
            action = experience.actions[i]
            reward = experience.rewards[i]
            policy_target[i][action] = experience.Advantages[i]
            value_target[i] = reward 
        self.model.fit(experience.states, [policy_target, value_target],
                       batch_size=batch_size, epochs=1)
        
    def Serialize(self, f:File):
        f.create_group('encoder')
        f['encoder'].attrs['name'] = self.encoder.name
        f['encoder'].attrs['board_width'] = self.encoder.board_width
        f['encoder'].attrs['board_height'] = self.encoder.board_height
        f.create_group('model')
        SaveModelToH5File(self.model, f['model'])

    def Diagnostics(self):
        return {'value': self.last_state_value}
    
def LoadACAgnet(f:File):
    model = LoadModelFromH5File(f['model'])
    encoder_name = f['encoder'].attrs['name']
    if not isinstance(encoder_name, str):
        encoder_name = encoder_name.decode('utf-8')
    board_width = f['encoder'].attrs['board_width']
    board_heigth = f['encoder'].attrs['board_height']
    encoder = DR.GetEncoderByName(encoder_name, (board_width, board_heigth))
    return ACAgent(model, encoder)

class ValueAgent(Agent):
    def __init__(self, model:Sequential, encoder:SimpleEncoder, policy='eps-greedy') -> None:
        super().__init__()
        self.model = model 
        self.encoder = encoder
        self.policy = policy
        self.collector : ExperienceCollector = None 
        self.temperature = 0.0
        self.last_move_value = 0

    def Predict(self, game_state:GameState):
        encoded_state = self.encoder.Encode(game_state)
        input_tensor = np.array([encoded_state])
        return self.model.predict(input_tensor)[0]
    
    def SetTemperature(self, temperature):
        self.temperature = temperature

    def SetCollector(self, collector):
        self.collector = collector

    def SetPolicy(self, policy):
        if policy not in ('eps-greedy', 'weighted'):
            raise ValueError(policy)
        self.policy = policy

    def SelectMove(self, game_state: GameState):
        moves = []
        board_tensors = []
        for move in game_state.LegalMoves():
            if not move.is_pass:
                continue
            next_state  = game_state.ApplyMove(move)
            board_tensor = self.encoder.Encode(next_state)
            moves.append(move)
            board_tensors.append(board_tensor)
        if not moves:
            return Move.Pass()
        
        board_tensors = np.array(board_tensors)
        opp_values = self.model.predict(board_tensors)
        opp_values = opp_values.reshape(len(moves))
        values = 1 - opp_values
        if self.policy == 'eps-greedy':
            ranked_moves = self.RankMoveEpsGreedy(values)
        elif self.policy == 'weighted':
            ranked_moves = self.RankMoveWeighted(values)
        else:
            ranked_moves = None 

        for i in ranked_moves:
            move = moves[i]
            if not IsPointAnEye(game_state.board, move.point, game_state.next_player):
                if self.collector is not None:
                    self.collector.RecordDecision(state=board_tensor, action=self.encoder.Encode(move.point))
                self.last_move_value = float(values[i])
                return move 
        
        return Move.Pass()

    def RankMoveWeighted(self, values):
        p = values / np.sum(values)
        p = np.power(p, 1.0 / self.temperature)
        p = p / np.sum(p)
        return np.random.choice(np.arange(0, len(values)), size=len(values), p=p, replace=False)
    
    def RankMoveEpsGreedy(self, values):
        if np.random.random()< self.temperature:
            values = np/random.random(values.shape)
        ranked_moves= np.argsort(values)
        return ranked_moves[::-1]

    def Train(self, experience, lr=0.1, batch_size=128):
        opt = SGD(learning_rate=lr)
        self.model.compile(lose='mse', optimizer=opt)
        n = experience.states.shape[0]
        y = np.zeros((n,))
        for i in range(n):
            reward = experience.rewards[i]
            y[i] = 1 if reward > 0 else 0
        self.model.fit(experience.states, y, batch_size=batch_size, epochs=1)

    def Serialize(self, f:File):
        f.create_group('encoder')
        f['encoder'].attrs['name'] = self.encoder.name
        f['encoder'].attrs['board_width'] = self.encoder.board_width
        f['encoder'].attrs['board_height'] = self.encoder.board_height
        f.create_group('model')
        SaveModelToH5File(self.model, f['model'])

    def Diagnostics(self):
        return {'value': self.last_move_value}

def LoadValueAgent(self, f:File):
    model = LoadModelFromH5File(f['model'])
    encoder_name = f['encoder'].attrs['name']
    if not isinstance(encoder_name, str):
        encoder_name = encoder_name.decode('utf-8')
    board_width = f['encoder'].attrs['board_widht']
    board_height = f['encoder'].attrs['board_height']
    encoder = DR.GetEncoderByName(encoder_name, (board_width, board_height))
    return ValueAgent(model, encoder)
