import random, collections
import copy
import numpy.random as random
import numpy as np
import time
StepOutput = collections.namedtuple("step_output", ["action", "probs"])

def random_value(time_step, player_id):
    return random.normal(scale=0.3)

def random_policy(time_step, player_id):
    legal = time_step.observations["legal_actions"][player_id]
    prob = [random.rand() for i in range(len(legal))]
    return [i for i in zip(legal, prob)]

class Agent(object):
    def __init__(self):
        pass

    def step(self, timestep):
        raise NotImplementedError


class RandomAgent(Agent):
    def __init__(self, _id):
        super().__init__()
        self.player_id = _id

    def step(self, timestep, env=None):
        cur_player = timestep.observations["current_player"]
        return StepOutput(action=random.choice(timestep.observations["legal_actions"][cur_player]), probs=1.0)

class MCTSAgent(Agent):
    def __init__(self, _id, value_fn, policy_fn, rollout_policy, lamda, n_playout, rollout_depth=100):
        super().__init__()
        if value_fn:
            self._value_fn = value_fn
        else:
            self._value_fn = random_value
        if policy_fn:
            self._policy_fn = policy_fn
        else:
            self._policy_fn = random_policy
        if rollout_policy:
            self._rollout_policy = rollout_policy
        else:
            self._rollout_policy = random_policy
        self._lambda = lamda
        self._n_playout = n_playout
        self._rollout_depth = rollout_depth
        self.mcts = MCTS(
            _id,
            self._value_fn, 
            self._policy_fn, 
            self._rollout_policy, 
            self._lambda, 
            self._n_playout,
            self._rollout_depth
        )
        self._player_id = _id

    def step(self, time_step, env):
        action = self.mcts.move(time_step, env)
        return StepOutput(action = action, probs=1.0)


class TreeNode(object):
    def __init__(self, parent, P):
        self._Q = 0     
        self._P = P
        self._u = P
        self._N = 0
        self._parent = parent
        self._children = dict()
        self._fully_explored = False

    def is_root(self):
        return self._parent is None

    def is_leaf(self):
        return self._children == {}

    def expand(self, actions):
        for action, prob in actions:
            if prob==0: continue
            self._children[action] = TreeNode(self, prob)

    def get_Q(self):
        return self._Q
    
    def get_u(self):
        return self._u

    def get_N(self):
        return self._N

    def get_succ(self):
        if self.is_leaf():
            return None
        else:
                # 判断是否有未被扩展的子节点
            if not self._fully_explored:
                for action, child in self._children.items():
                    if child.get_N()==0:
                        return action, child
            # 若没有，则选择UCB最大的子节点
            self._fully_explored = True
            Q_list = [(child, child[1].get_Q()+child[1].get_u()) for child in self._children.items()]
            random.shuffle(Q_list)
            Q_list.sort(key=lambda x: x[1], reverse=True)
            return Q_list[0][0]

    def get_most_visited_child(self):
        if self.is_leaf():
            raise ValueError
        else:
            N_list = [(child, child[1].get_N()) for child in self._children.items()]
            random.shuffle(N_list)
            N_list.sort(key=lambda x: x[1], reverse=True)
            return N_list[0][0]
    
    def update_Q(self, V):
        self._Q = (self._Q*self._N + V)/(self._N+1)
        self._N  = self._N + 1
        self._u = 6*self._P/(1+self._N)

    def update_Q_along_path(self, V):
        self.update_Q(V)
        if self._parent:
            self._parent.update_Q_along_path(V)


class MCTS(object):
    def __init__(self, id, value, policy, rollout_policy, lamda, n_playout, rollout_depth):
        self._root = TreeNode(None, 1)
        self._value = value
        self._policy = policy
        self._rollout_policy = rollout_policy
        self._lambda = lamda
        self._n_playout = n_playout
        self._id = id
        self._cur_player = id
        self._rollout_depth = rollout_depth


    def _playout(self,time_step, envcpy):
        self._cur_player = self._id
        node = self._root
        while 1:
            if node.is_leaf():
                break
            action, node = node.get_succ()
            time_step = envcpy.step(action)
            self._cur_player = 1 if self._cur_player==0 else 0
        v_theta = self._value(time_step, self._cur_player)
        if time_step.last():
            reward = time_step.rewards[self._id]
        else:
            actions = self._policy(time_step, self._cur_player)
            node.expand(actions)
            reward = self._rollout(time_step, envcpy)
        V = self._lambda*reward + (1-self._lambda)*v_theta
        node.update_Q_along_path(V)

    def _rollout(self, time_step, envcpy):
        rollout_depth = self._rollout_depth
        while not time_step.last() and rollout_depth>0:
            actions = self._rollout_policy(time_step, self._cur_player)
            random.shuffle(actions)
            actions.sort(key=lambda x:x[1], reverse=True)
            best_action = actions[0][0]
            time_step = envcpy.step(best_action)
            self._cur_player = 1 if self._cur_player==0 else 0
            rollout_depth -= 1
        reward = time_step.rewards[self._id]
        return reward

    def move(self, time_step, env):
        self._root = TreeNode(None, 1)
        for i in range(self._n_playout):
            envcpy = copy.deepcopy(env)
            self._playout(time_step, envcpy)
        action, child = self._root.get_most_visited_child()
        return action
        
        