from collections import defaultdict

from template import Agent
from Sequence.sequence_model import SequenceGameRule as GameRule
import random
import time
import math
import copy


THINKTIME   = 0.9
NUM_PLAYERS = 2
JOKER   = '#'
EMPTY   = '_'

#use my game rule to avoid error in generate successor
class MyGameRule(GameRule):
    def generateSuccessor(self, state, action, agent_id):
        new_state = copy.deepcopy(state)
        new_state.board.new_seq = False

        plr_state = new_state.agents[agent_id]
        plr_state.last_action = action
        reward = 0

        card = action['play_card']
        draft = action['draft_card']

        #Only change this part
        #--- 执行抽象化的手牌更新（避免真实牌堆） ---
        if card:
            if card in plr_state.hand:
                plr_state.hand.remove(card)
            plr_state.discard = card
            new_state.board.draft.remove(draft)
            # 模拟抽牌：随机替换
            if draft not in plr_state.hand:
                plr_state.hand.append(draft)

        # If action was to trade in a dead card, action is complete, and agent gets to play another card.
        if action['type'] == 'trade':
            plr_state.trade = True  # Switch trade flag to prohibit agent performing a second trade this turn.
            plr_state.agent_trace.action_reward.append(
                (action, reward))  # Log this turn's action and score (zero).
            return new_state

        # Update Sequence board. If action was to place/remove a marker, add/subtract it from the board.
        r, c = action['coords']
        if action['type'] == 'place':
            new_state.board.chips[r][c] = plr_state.colour
            new_state.board.empty_coords.remove(action['coords'])
            new_state.board.plr_coords[plr_state.colour].append(action['coords'])
        elif action['type'] == 'remove':
            new_state.board.chips[r][c] = EMPTY
            new_state.board.empty_coords.append(action['coords'])
            new_state.board.plr_coords[plr_state.opp_colour].remove(action['coords'])
        else:
            print("Action unrecognised.")

        # Check if a sequence has just been completed. If so, upgrade chips to special sequence chips.
        if action['type'] == 'place':
            seq, seq_type = self.checkSeq(new_state.board.chips, plr_state, (r, c))
            if seq:
                reward += seq['num_seq']
                new_state.board.new_seq = seq_type
                for sequence in seq['coords']:
                    for r, c in sequence:
                        if new_state.board.chips[r][c] != JOKER:  # Joker spaces stay jokers.
                            new_state.board.chips[r][c] = plr_state.seq_colour
                            try:
                                new_state.board.plr_coords[plr_state.colour].remove(action['coords'])
                            except:  # Chip coords were already removed with the first sequence.
                                pass
                plr_state.completed_seqs += seq['num_seq']
                plr_state.seq_orientations.extend(seq['orientation'])

        plr_state.trade = False  # Reset trade flag if agent has completed a full turn.
        plr_state.agent_trace.action_reward.append(
            (action, reward))  # Log this turn's action and any resultant score.
        plr_state.score += reward
        return new_state

#record Q-value
class QTable():
    def __init__(self, alpha=1, default_q_value=0.0):
        self.qtable = defaultdict(lambda: default_q_value)
        self.alpha = alpha


    def update(self, state, action, delta):
        key = (state, self.make_action_key(action))
        self.qtable[key] = self.qtable[key] + self.alpha * delta

    def batch_update(self, states, actions, deltas):
        for state, action, delta in zip(states, actions, deltas):
            self.update(state, action, delta)

    def get_q_value(self, state, action):
        return self.qtable[(state, self.make_action_key(action))]

    def get_q_values(self, states, actions):
        return [self.get_q_value(state, action) for state, action in zip(states, actions)]

    def make_action_key(self, action):
        return str(action)


class MCTSNode:
    def __init__(self, agent_id, state, q_table, parent=None, action=None):
        self.id = agent_id
        self.state = state
        self.qTable = q_table
        self.parent = parent
        self.action = action
        self.children = []
        self.visits = 0 # 访问次数N()
        self.wins = 0
        self.game_rule = MyGameRule(NUM_PLAYERS)
        self.untried_actions = self.game_rule.getLegalActions(state,self.id)

    def is_fully_expanded(self):
        return len(self.untried_actions) == 0

    # use UCT to choose the best child to expand
    def uct_select(self, c=math.sqrt(2)):
        best_score = float('-inf')
        best_children = []

        # 在值最大的几个孩子中随机选一个
        for child in self.children:
            if child.visits == 0:
                uct_score = float('inf')
            else:
                q_value = self.qTable.get_q_value(child.state, child.action)
                uct_score = q_value + c * math.sqrt(2 * math.log(self.visits) / child.visits)

            if uct_score > best_score:
                best_score = uct_score
                best_children = [child]
            elif uct_score == best_score:
                best_children.append(child)

        return random.choice(best_children)

    def expand(self):

        # Randomly select an unexpanded action to expand
        action = random.choice(self.untried_actions)
        self.untried_actions.remove(action)
        #print("untried_action:",self.untried_actions)
        next_state = self.game_rule.generateSuccessor(self.state, action, self.id)
        child_node = MCTSNode(self.id, next_state, self.qTable, parent=self, action=action)
        #print("draft card:",self.state.board.draft)
        #print("action_choose:",action)
        #print(child_node.state.agents[self.id].hand)
        self.children.append(child_node)
        return child_node
        #return self.get_outcome_child(action)

    def back_propagate(self, result):

        self.visits += 1
        self.wins += result

        q_value = self.qTable.get_q_value(self.state, self.action)
        delta = (1 / self.visits) * (result - q_value)
        self.qTable.update(self.state, self.action, delta)

        if self.parent:
            self.parent.back_propagate(result)

    # return if agent can find the game end
    def is_terminal_node(self):
        return self.game_rule.gameEnds()

    def rollout(self):
        current_state = copy.deepcopy(self.state)
        deep = 0
        while current_state.agents[self.id].completed_seqs < 2 and deep<5: #not self.game_rule.gameEnds(current_state):
            legal_actions = self.game_rule.getLegalActions(current_state, self.id)
            if not legal_actions:
                break
            action = random.choice(legal_actions)
            #print(action)
            #print(current_state.agents[self.id].hand)
            current_state = self.game_rule.generateSuccessor(current_state, action, self.id)
            deep += 1
        print(deep)
        return current_state.agents[self.id].score
        #return self.get_result(current_state)

    def get_result(self, final_state):
        reward = final_state
        team = self.state.agents[self.state.agent_to_move].team
        scores = final_state.getScores()
        if scores[team] > scores[1 - team]:
            return 1
        elif scores[team] == scores[1 - team]:
            return 0.5
        return 0

class myAgent(Agent):
    def __init__(self,_id):
        super().__init__(_id)
        self.id = _id
        self.game_rule = MyGameRule(NUM_PLAYERS)
        self.qtable = QTable()

    #取指定状态的行动
    def GetActions(self, state):
        return self.game_rule.getLegalActions(state, self.id)

    def SelectAction(self,actions,game_state):
        start_time = time.time()
        root = MCTSNode(self.id, game_state, self.qtable)

        i = 0
        while time.time() - start_time < THINKTIME:
            #for do20time in range(62):
            # Select
            x = time.time()
            node = root
            while node.children and node.is_fully_expanded():
                node = node.uct_select()

            x = time.time()
            # Expansion
            if not node.is_terminal_node():
                node = node.expand()
            #print("time expand", time.time() - x)
            x = time.time()
            # Simulation
            result = node.rollout()
            #print("time roll",time.time() - x)
            #if result >0:
            #   print(result)

            # Backpropagation
            x = time.time()
            node.back_propagate(result)
            #print("time back", time.time() - x)
            i+=1

        action = random.choice(actions)
        if root.children:
            #best_child = max(root.children, key=lambda child: child.visits)
            #action = best_child.action

            max_visits = max(child.visits for child in root.children)

            most_visited_children = [child for child in root.children if child.visits == max_visits]

            # 从这些子节点中选择 Q 值最大的
            best_child = max(most_visited_children,
                             key=lambda child: self.qtable.get_q_value(child.state, child.action))

            action = best_child.action

        #输出所有节点看看

        return action


