from template import Agent
import random
import copy

BOARD_SIZE = 10
DICOUNT = 0.9       # 折扣因子
ITERATIONS = 5    # 价值迭代次数
REWARD_PLACE = 1
REWARD_BLOCK = 3
REWARD_SEQUENCE = 10

class myAgent(Agent):
    def __init__(self, _id):
        super().__init__(_id)
        self.value_table = [[0.0 for _ in range(BOARD_SIZE)] for _ in range(BOARD_SIZE)]

    def SelectAction(self, actions, game_state):
        team = game_state.agents[self.id].colour
        opp_team = 'r' if team == 'b' else 'b'
        chips = game_state.board.chips

        # 基于当前局面进行价值迭代估值
        self.value_iteration(chips, team, opp_team)

        # 选择价值最高的动作
        best_score = -float('inf')
        best_action = None

        for action in actions:
            if not action or 'coords' not in action or action['coords'] is None:
                continue

            x, y = action['coords']

            if not (0 <= x < BOARD_SIZE and 0 <= y < BOARD_SIZE):
                continue

            score = 0
            if action['type'] == 'place':
                score = self.value_table[y][x] # 价值表
                if action.get('new_seq'):
                    score += REWARD_SEQUENCE * 10
            elif action['type'] == 'remove':
                score = self.estimate_block_value(chips, x, y, opp_team) * REWARD_BLOCK

            if score > best_score:
                best_score = score
                best_action = action
        print(f"Best action: {best_action}")
        return best_action if best_action else random.choice(actions)

    def value_iteration(self, chips, team, opp_team):
        V = copy.deepcopy(self.value_table)

        for _ in range(ITERATIONS):
            new_V = [[0.0 for _ in range(BOARD_SIZE)] for _ in range(BOARD_SIZE)]
            for y in range(BOARD_SIZE):
                for x in range(BOARD_SIZE):
                    if chips[y][x] != ' ':
                        continue  # 已有棋子，跳过

                    reward = self.estimate_reward(chips, x, y, team, opp_team)
                    neighbors = self.get_neighbors_value(V, x, y)
                    new_V[y][x] = reward + DICOUNT * neighbors
            V = new_V

        self.value_table = V

    def estimate_reward(self, chips, x, y, team, opp_team):
        reward = REWARD_PLACE
        reward += self.count_adjacent(chips, x, y, team) * 0.5
        reward -= self.count_adjacent(chips, x, y, opp_team) * 0.3
        return reward

    def get_neighbors_value(self, V, x, y):
        directions = [(-1,0),(1,0),(0,-1),(0,1),(-1,-1),(1,1),(-1,1),(1,-1)]
        total = 0
        count = 0
        for dx, dy in directions:
            nx, ny = x + dx, y + dy
            if 0 <= nx < BOARD_SIZE and 0 <= ny < BOARD_SIZE:
                total += V[ny][nx]
                count += 1
        return total / count if count else 0

    def count_adjacent(self, chips, x, y, team):
        count = 0
        directions = [(-1,0),(1,0),(0,-1),(0,1),(-1,-1),(1,1),(-1,1),(1,-1)]
        for dx, dy in directions:
            nx, ny = x + dx, y + dy
            if 0 <= nx < BOARD_SIZE and 0 <= ny < BOARD_SIZE:
                if chips[ny][nx] == team:
                    count += 1
        return count

    def estimate_block_value(self, chips, x, y, opp_team):
        if chips[y][x] != opp_team:
            return 0
        return self.count_adjacent(chips, x, y, opp_team)

# python general_game_runner.py -g Sequence -a agents.t_069.greedy,agents.t_069.value