from template import Agent
from Sequence.sequence_model import SequenceGameRule as GameRule
import copy
import time
import random
from collections import defaultdict

THINKTIME = 0.9
NUM_PLAYERS = 2

class MyGameRule(GameRule):
    def generateSuccessor(self, state, action, agent_id):
        new_state = copy.deepcopy(state)
        new_state.board.new_seq = False

        plr_state = new_state.agents[agent_id]
        plr_state.last_action = action
        reward = 0
        card = action['play_card']
        draft = action['draft_card']
        
        center_bonus = [
            [0,4,0,0,0,0,0,0,4,0],
            [4,4,1,2,2,2,2,1,4,4],
            [0,1,3,3,3,3,3,3,1,0],
            [0,2,3,5,5,5,5,3,2,0],
            [0,2,3,5,7,7,5,3,2,0],
            [0,2,3,5,7,7,5,3,2,0],
            [0,2,3,5,5,5,5,3,2,0],
            [0,1,3,3,3,3,3,3,1,0],
            [4,4,1,2,2,2,2,1,4,4],
            [0,4,0,0,0,0,0,0,4,0]
        ]

        if card:
            if card in plr_state.hand:
                plr_state.hand.remove(card)
            plr_state.discard = card
            new_state.board.draft.remove(draft)
            if draft not in plr_state.hand:
                plr_state.hand.append(draft)

        if action['type'] == 'trade':
            plr_state.trade = True
            plr_state.agent_trace.action_reward.append((action, reward))
            return new_state

        r, c = action['coords']
        if action['type'] == 'place':
            new_state.board.chips[r][c] = plr_state.colour
            new_state.board.empty_coords.remove(action['coords'])
            new_state.board.plr_coords[plr_state.colour].append(action['coords'])
        elif action['type'] == 'remove':
            new_state.board.chips[r][c] = '_'
            new_state.board.empty_coords.append(action['coords'])
            new_state.board.plr_coords[plr_state.opp_colour].remove(action['coords'])
            reward += 5
            colour = plr_state.opp_colour
            chips = new_state.board.chips  # 注意：用的是移除前的棋盘
            dirs = [(1, 0), (0, 1), (1, 1), (1, -1)]

            def is_on_enemy_sequence(chips, x, y, colour):
                for dx, dy in dirs:
                    count = 1

                    # 向正方向扩展
                    i = 1
                    while i <= 4:
                        nx, ny = x + dx * i, y + dy * i
                        if 0 <= nx < 10 and 0 <= ny < 10 and chips[nx][ny] == colour:
                            count += 1
                            i += 1
                        else:
                            break

                    # 向反方向扩展
                    i = 1
                    while i <= 4:
                        nx, ny = x - dx * i, y - dy * i
                        if 0 <= nx < 10 and 0 <= ny < 10 and chips[nx][ny] == colour:
                            count += 1
                            i += 1
                        else:
                            break

                    if count >= 2:  # 属于敌方2连及以上的部分
                        return count
                return False

            enemy_seq_count = is_on_enemy_sequence(chips, r, c, colour)
            if enemy_seq_count:
                if enemy_seq_count >= 4:
                    reward += 10  # 阻止对方一招致胜
                elif enemy_seq_count == 3:
                    reward += 5  # 阻止潜在4连
                else:
                    reward += 0   # 阻止2连

            reward += center_bonus[r][c]  # 中心位置奖励

        if action['type'] == 'place':
            seq, seq_type = self.checkSeq(new_state.board.chips, plr_state, (r, c))
            reward += center_bonus[r][c]  # 中心位置奖励
            if seq:
                new_state.board.new_seq = seq_type
                base_reward = 10 * seq['num_seq']  # 基础奖励
                bonus = 0

                if seq_type == 1:
                    bonus = 10  # 普通奖励
                elif seq_type == 2:
                    bonus = 20  # 带有角落或特殊位置奖励
                elif seq_type == 3:
                    bonus = 30  # 多序列合成奖励更高

                reward += base_reward + bonus
                # print(f"Action: {action}, Sequence: {seq_type}, Reward: {seq['num_seq']}")
                # reward += seq['num_seq']
                new_state.board.new_seq = seq_type
                for sequence in seq['coords']:
                    for r, c in sequence:
                        if new_state.board.chips[r][c] != '#':
                            new_state.board.chips[r][c] = plr_state.seq_colour
                            try:
                                new_state.board.plr_coords[plr_state.colour].remove(action['coords'])
                            except:
                                pass
                plr_state.completed_seqs += seq['num_seq']
                plr_state.seq_orientations.extend(seq['orientation'])

        plr_state.trade = False
        plr_state.agent_trace.action_reward.append((action, reward))
        plr_state.score += reward
        return new_state


class myAgent(Agent):
    def __init__(self, _id):
        super().__init__(_id)
        self.id = _id
        self.game_rule = MyGameRule(NUM_PLAYERS)
        self.discount = 0.9
        self.max_depth = 2

    def evaluate(self, state, depth):
        """
        递归估值函数，采用 discount * future reward 的方式估计当前状态值。
        """
        if depth == 0:
            return state.agents[self.id].score

        actions = self.game_rule.getLegalActions(state, self.id)
        if not actions:
            return state.agents[self.id].score

        best_value = float('-inf')
        for action in actions:
            next_state = self.game_rule.generateSuccessor(state, action, self.id)
            immediate_reward = next_state.agents[self.id].score - state.agents[self.id].score
            future_value = self.evaluate(next_state, depth - 1)
            total_value = immediate_reward + self.discount * future_value
            best_value = max(best_value, total_value)

        return best_value

    def SelectAction(self, actions, game_state):
        start_time = time.time()
        best_action = random.choice(actions)
        best_value = float('-inf')

        for action in actions:
            if time.time() - start_time > THINKTIME:
                print("Time limit reached.")
                break
            next_state = self.game_rule.generateSuccessor(game_state, action, self.id)
            immediate_reward = next_state.agents[self.id].score - game_state.agents[self.id].score
            future_value = self.evaluate(next_state, self.max_depth - 1)
            total_value = immediate_reward + self.discount * future_value
            # print(f"Action: {action}, Value: {total_value:.2f}")

            if total_value > best_value:
                best_value = total_value
                best_action = action

            

        return best_action
