from collections import defaultdict

from template import Agent
from Sequence.sequence_model import SequenceGameRule as GameRule
import random
import time
import math
import copy


THINKTIME   = 0.9
NUM_PLAYERS = 2
JOKER   = '#'
EMPTY   = '_'
center_bonus = [
            [0,4,5,6,8,8,6,5,4,0],
            [4,6,7,9,11,11,9,7,6,4],
            [5,7,10,12,14,14,12,10,7,5],
            [6,9,12,15,17,17,15,12,9,6],
            [8,11,14,17,20,20,17,14,11,8],
            [8,11,14,17,20,20,17,14,11,8],
            [6,9,12,15,17,17,15,12,9,6],
            [5,7,10,12,14,14,12,10,7,5],
            [4,6,7,9,11,11,9,7,6,4],
            [0,4,5,6,8,8,6,5,4,0],
        ]

DIRECTIONS = [(1, 0), (0, 1), (1, 1), (1, -1)]

BOARD = [['jk','2s','3s','4s','5s','6s','7s','8s','9s','jk'],
         ['6c','5c','4c','3c','2c','ah','kh','qh','th','ts'],
         ['7c','as','2d','3d','4d','5d','6d','7d','9h','qs'],
         ['8c','ks','6c','5c','4c','3c','2c','8d','8h','ks'],
         ['9c','qs','7c','6h','5h','4h','ah','9d','7h','as'],
         ['tc','ts','8c','7h','2h','3h','kh','td','6h','2d'],
         ['qc','9s','9c','8h','9h','th','qh','qd','5h','3d'],
         ['kc','8s','tc','qc','kc','ac','ad','kd','4h','4d'],
         ['ac','7s','6s','5s','4s','3s','2s','2h','3h','5d'],
         ['jk','ad','kd','qd','td','9d','8d','7d','6d','jk']]

#Store dict of cards and their coordinates for fast lookup.
COORDS = defaultdict(list)
for row in range(10):
    for col in range(10):
        COORDS[BOARD[row][col]].append((row,col))

#use my game rule to avoid error in generate successor
class MyGameRule(GameRule):
    def generateSuccessor(self, state, action, agent_id):
        new_state = copy.deepcopy(state)
        new_state.board.new_seq = False

        plr_state = new_state.agents[agent_id]
        plr_state.last_action = action
        reward = 0
        board = new_state.board.chips
        empty_cards = []

        for row in range(10):
            for col in range(10):
                if board[row][col] == '_':  # 空位
                    card = BOARD[row][col]
                    empty_cards.append(card)

        for card in plr_state.hand:
            if card in empty_cards:
                empty_cards.remove(card)
        for card in new_state.board.draft:
            if card in empty_cards:
                empty_cards.remove(card)
        random_card = random.choice(empty_cards)

        card = action['play_card']
        draft = action['draft_card']

        #Only change this part
        #--- 执行抽象化的手牌更新（避免真实牌堆） ---
        if card:
            if card in plr_state.hand:
                plr_state.hand.remove(card)
            plr_state.discard = card
            new_state.board.draft.remove(draft)
            # 模拟抽牌：随机替换
            #if draft not in plr_state.hand:
            plr_state.hand.append(draft)
            #new_state.board.draft.extend(random_card)

        # If action was to trade in a dead card, action is complete, and agent gets to play another card.
        if action['type'] == 'trade':
            plr_state.trade = True  # Switch trade flag to prohibit agent performing a second trade this turn.
            plr_state.agent_trace.action_reward.append(
                (action, reward))  # Log this turn's action and score (zero).
            return new_state

        # Update Sequence board. If action was to place/remove a marker, add/subtract it from the board.
        r, c = action['coords']
        if action['type'] == 'place':
            new_state.board.chips[r][c] = plr_state.colour
            new_state.board.empty_coords.remove(action['coords'])
            new_state.board.plr_coords[plr_state.colour].append(action['coords'])
        elif action['type'] == 'remove':
            new_state.board.chips[r][c] = EMPTY
            new_state.board.empty_coords.append(action['coords'])
            new_state.board.plr_coords[plr_state.opp_colour].remove(action['coords'])
        else:
            print("Action unrecognised.")

        # Check if a sequence has just been completed. If so, upgrade chips to special sequence chips.
        if action['type'] == 'place':
            seq, seq_type = self.checkSeq(new_state.board.chips, plr_state, (r, c))
            if seq:
                reward += seq['num_seq']
                new_state.board.new_seq = seq_type
                for sequence in seq['coords']:
                    for r, c in sequence:
                        if new_state.board.chips[r][c] != JOKER:  # Joker spaces stay jokers.
                            new_state.board.chips[r][c] = plr_state.seq_colour
                            try:
                                new_state.board.plr_coords[plr_state.colour].remove(action['coords'])
                            except:  # Chip coords were already removed with the first sequence.
                                pass
                plr_state.completed_seqs += seq['num_seq']
                plr_state.seq_orientations.extend(seq['orientation'])

        plr_state.trade = False  # Reset trade flag if agent has completed a full turn.
        plr_state.agent_trace.action_reward.append(
            (action, reward))  # Log this turn's action and any resultant score.
        plr_state.score += reward
        return new_state

#record Q-value
class QTable():
    def __init__(self, alpha=1, default_q_value=0.0):
        self.qtable = defaultdict(lambda: default_q_value)
        self.alpha = alpha


    def update(self, state, action, delta):
        key = (state, self.make_action_key(action))
        self.qtable[key] = self.qtable[key] + self.alpha * delta

    def batch_update(self, states, actions, deltas):
        for state, action, delta in zip(states, actions, deltas):
            self.update(state, action, delta)

    def get_q_value(self, state, action):
        return self.qtable[(state, self.make_action_key(action))]

    def get_q_values(self, states, actions):
        return [self.get_q_value(state, action) for state, action in zip(states, actions)]

    def make_action_key(self, action):
        return str(action)


class MCTSNode:
    def __init__(self, agent_id, state, q_table, parent=None, action=None):
        self.id = agent_id
        self.state = state
        self.qTable = q_table
        self.parent = parent
        self.action = action
        self.children = []
        self.visits = 0 # 访问次数N()
        self.wins = 0
        self.game_rule = MyGameRule(NUM_PLAYERS)
        self.untried_actions = self.game_rule.getLegalActions(state,self.id)

    def is_fully_expanded(self):
        return len(self.untried_actions) == 0

    # use UCT to choose the best child to expand
    def uct_select(self, c=math.sqrt(2)):
        best_score = float('-inf')
        best_children = []

        # 在值最大的几个孩子中随机选一个
        for child in self.children:
            if child.visits == 0:
                uct_score = float('inf')
            else:
                #q_value = self.qTable.get_q_value(child.state, child.action)
                q_value = child.wins / child.visits
                uct_score = q_value + c * math.sqrt(2 * math.log(self.visits) / child.visits)

            if uct_score > best_score:
                best_score = uct_score
                best_children = [child]
            elif uct_score == best_score:
                best_children.append(child)

        return random.choice(best_children)

    def expand(self):

        # Randomly select an unexpanded action to expand
        # action = random.choice(self.untried_actions)
        action = self.action_choose(self.untried_actions,self.state,self.id)
        self.untried_actions.remove(action)

        next_state = self.game_rule.generateSuccessor(self.state, action, self.id)

        child_node = MCTSNode(self.id, next_state, self.qTable, parent=self, action=action)

        self.children.append(child_node)
        return child_node
        #return self.get_outcome_child(action)

    def back_propagate(self, result):

        self.visits += 1
        self.wins += result

        if self.parent:
            self.parent.back_propagate(result)

    # return if agent can find the game end
    def is_terminal_node(self):
        return self.game_rule.gameEnds()

    def rollout2(self):
        current_state = copy.deepcopy(self.state)
        enemy_id = 1 - self.id
        current_state.agents[enemy_id].hand = ['jc']
        while current_state.agents[enemy_id].completed_seqs < 2 and current_state.agents[self.id].completed_seqs < 2:
            legal_actions = self.game_rule.getLegalActions(current_state, enemy_id)
            if not legal_actions:
                break
            # action = random.choice(legal_actions)
            action = self.action_choose(legal_actions, current_state, enemy_id)
            current_state = self.game_rule.generateSuccessor(current_state, action, enemy_id)

            legal_actions = self.game_rule.getLegalActions(current_state, self.id)
            if not legal_actions:
                break
            # action = random.choice(legal_actions)
            action = self.action_choose(legal_actions, current_state, self.id)

            current_state = self.game_rule.generateSuccessor(current_state, action, self.id)
        if current_state.agents[self.id].score == 0 and current_state.agents[enemy_id].score == 0:
            return 0, 1
        return current_state.agents[self.id].score - current_state.agents[self.id].score , 0

    # 分析该落点能够形成最长序列
    def count_consecutive(self, board, r, c, colour):
        max_consecutive = 0
        if colour == 'r':
            alt_colour = 'X'
        else:
            alt_colour = 'O'
        for dr, dc in DIRECTIONS:
            count = 1
            # 正方向
            nr, nc = r + dr, c + dc
            while 0 <= nr < 10 and 0 <= nc < 10 and (board[nr][nc] == colour or board[nr][nc] == alt_colour):
                count += 1
                if board[nr][nc] == alt_colour:
                    alt_colour = colour
                nr += dr
                nc += dc
            # 反方向
            if colour == 'r':
                alt_colour = 'X'
            else:
                alt_colour = 'O'
            nr, nc = r - dr, c - dc
            while 0 <= nr < 10 and 0 <= nc < 10 and (board[nr][nc] == colour or board[nr][nc] == alt_colour or board[nr][nc] == '#'):
                count += 1
                if board[nr][nc] == alt_colour:
                    alt_colour = colour
                nr -= dr
                nc -= dc
            max_consecutive = max(max_consecutive, count)
            if max_consecutive >= 5:
                max_consecutive = 10
        return max_consecutive

    def action_choose(self, actions, state, self_id):

        best_score = -float('inf')
        best_action = None
        board = state.board.chips

        for action in actions:

            # 预测敌人可能获得 draft_card 后落子形成的潜在序列长度
            draft_card = action['draft_card']
            draft_coords = COORDS.get(draft_card, [])

            draft_score1 = 0
            draft_score2 = 0
            for dr, dc in draft_coords:
                if board[dr][dc] == '_':
                    draft_score1 = max(draft_score1, self.count_consecutive(board, dr, dc, state.agents[1 - self_id].colour))
                    draft_score2 = max(draft_score2,
                                       self.count_consecutive(board, dr, dc, state.agents[1 - self_id].colour))
            action_score = 0
            if action['type'] != 'trade':
                r, c = action['coords']

                # 自己落子后形成的序列长度
                action_score = self.count_consecutive(board, r, c, state.agents[self_id].colour)

            center_bonus_value = center_bonus[r][c] if action['type'] != 'trade' else 0
            total_score = action_score + 0.5 * (draft_score1+draft_score2) + center_bonus_value

            if draft_card == 'jd' or draft_card == 'jc':
                total_score += 1000

            if total_score > best_score:
                best_score = total_score
                best_action = action
        return best_action


class myAgent(Agent):
    def __init__(self,_id):
        super().__init__(_id)
        self.id = _id
        self.game_rule = MyGameRule(NUM_PLAYERS)
        self.qtable = QTable()

    #取指定状态的行动
    def GetActions(self, state):
        return self.game_rule.getLegalActions(state, self.id)

    def SelectAction(self,actions,game_state):
        start_time = time.time()
        root = MCTSNode(self.id, game_state, self.qtable)

        i = 0
        while time.time() - start_time < THINKTIME:
            # Select
            x = time.time()
            node = root
            while node.children and node.is_fully_expanded():
                node = node.uct_select()


            # Expansion
            if not node.is_terminal_node():
                node = node.expand()

            # Simulation
            result, changei = node.rollout2()

            # Backpropagation
            node.back_propagate(result)

        action = random.choice(actions)

        if not action:
            return None

        if root.children:

            best_child = max(root.children, key=lambda child: child.wins / child.visits)
            action = best_child.action



        return action