from template import Agent
from collections import defaultdict
from Sequence.sequence_model import SequenceGameRule as GameRule
import random
import time
import math
import copy

THINKTIME = 0.9
NUM_PLAYERS = 2
JOKER = '#'
EMPTY = '_'
TRADSEQ = 1
HOTBSEQ = 2
MULTSEQ = 3

class MyGameRule(GameRule):
    def generateSuccessor(self, state, action, agent_id):
        new_state = copy.deepcopy(state)
        new_state.board.new_seq = False

        plr_state = new_state.agents[agent_id]
        plr_state.last_action = action
        reward = 0

        card = action['play_card']
        draft = action['draft_card']

        if card:
            if card in plr_state.hand:
                plr_state.hand.remove(card)
            plr_state.discard = card
            new_state.board.draft.remove(draft)
            if draft not in plr_state.hand:
                plr_state.hand.append(draft)

        if action['type'] == 'trade':
            plr_state.trade = True
            plr_state.agent_trace.action_reward.append((action, reward))
            return new_state

        r, c = action['coords']
        if action['type'] == 'place':
            new_state.board.chips[r][c] = plr_state.colour
            new_state.board.empty_coords.remove(action['coords'])
            new_state.board.plr_coords[plr_state.colour].append(action['coords'])
        elif action['type'] == 'remove':
            new_state.board.chips[r][c] = EMPTY
            new_state.board.empty_coords.append(action['coords'])
            new_state.board.plr_coords[plr_state.opp_colour].remove(action['coords'])

        if action['type'] == 'place':
            seq, seq_type = self.checkSeq(new_state.board.chips, plr_state, (r, c))
            if seq:
                reward += seq['num_seq']
                new_state.board.new_seq = seq_type
                for sequence in seq['coords']:
                    for r, c in sequence:
                        if new_state.board.chips[r][c] != JOKER:
                            new_state.board.chips[r][c] = plr_state.seq_colour
                plr_state.completed_seqs += seq['num_seq']
                plr_state.seq_orientations.extend(seq['orientation'])

        plr_state.trade = False
        plr_state.agent_trace.action_reward.append((action, reward))
        plr_state.score += reward
        return new_state

class QTable:
    def __init__(self, alpha=0.1, default_q_value=0.0):
        self.qtable = defaultdict(lambda: default_q_value)
        self.alpha = alpha

    def update(self, state, action, delta):
        key = (str(state.board.chips), action['type'], action.get('coords'))
        self.qtable[key] += self.alpha * delta

    def get_q_value(self, state, action):
        key = (str(state.board.chips), action['type'], action.get('coords'))
        return self.qtable[key]

class MCTSNode:
    def __init__(self, agent_id, state, q_table, heuristic_agent, parent=None, action=None):
        self.id = agent_id
        self.state = state
        self.q_table = q_table
        self.heuristic = heuristic_agent
        self.parent = parent
        self.action = action
        self.children = []
        self.visits = 0
        self.wins = 0
        self.game_rule = MyGameRule(NUM_PLAYERS)
        self.untried_actions = self.game_rule.getLegalActions(state, self.id)

    def uct_select(self, c=1.414):
        best_score = -float('inf')
        best_children = []
        for child in self.children:
            exploit = child.wins / child.visits if child.visits > 0 else 0
            explore = math.sqrt(math.log(self.visits) / child.visits) if child.visits > 0 else float('inf')
            uct_score = exploit + c * explore
            uct_score += self.heuristic.evaluate_action(child.state, child.action) * 0.1
            if uct_score > best_score:
                best_score = uct_score
                best_children = [child]
            elif uct_score == best_score:
                best_children.append(child)
        return random.choice(best_children)

    def expand(self):
        action = max(self.untried_actions, key=lambda a: self.heuristic.evaluate_action(self.state, a))
        self.untried_actions.remove(action)
        next_state = self.game_rule.generateSuccessor(self.state, action, self.id)
        child = MCTSNode(self.id, next_state, self.q_table, self.heuristic, self, action)
        self.children.append(child)
        return child

    def backpropagate(self, result):
        self.visits += 1
        self.wins += result
        if self.parent:
            self.parent.backpropagate(result)

    def rollout(self):
        current_state = copy.deepcopy(self.state)
        for _ in range(3):
            actions = self.game_rule.getLegalActions(current_state, self.id)
            if not actions:
                break
            action = max(actions, key=lambda a: self.heuristic.evaluate_action(current_state, a))
            current_state = self.game_rule.generateSuccessor(current_state, action, self.id)
        return self.heuristic.calculate_rollout_score(current_state)

class HeuristicAgent:
    def __init__(self, _id):
        self.id = _id
        self.center_bonus = [
            [0,4,0,0,0,0,0,0,4,0],
            [4,4,1,2,2,2,2,1,4,4],
            [0,1,3,3,3,3,3,3,1,0],
            [0,2,3,5,5,5,5,3,2,0],
            [0,2,3,5,7,7,5,3,2,0],
            [0,2,3,5,7,7,5,3,2,0],
            [0,2,3,5,5,5,5,3,2,0],
            [0,1,3,3,3,3,3,3,1,0],
            [4,4,1,2,2,2,2,1,4,4],
            [0,4,0,0,0,0,0,0,4,0]
        ]

    def evaluate_action(self, state, action):
        score = 0
        if not action or action.get('coords') is None:
            return 0
       
            
        x, y = action['coords']
        if not (0 <= x <=9 and 0 <= y <=9):
            return score

        if action['type'] == 'place':
            score += self.center_bonus[y][x] * 4
            score += self.evaluate_immediate_threat(state.board.chips, x, y, state.agents[self.id].colour) * 2
            score += self.evaluate_potential(state.board.chips, x, y, state.agents[self.id].colour) * 60
        elif action['type'] == 'remove':
            score += self.calculate_danger(state.board.chips, x, y, state.agents[self.id].opp_colour) * 120

        return score

    def calculate_rollout_score(self, state):
        return state.agents[self.id].score * 100 + sum(
            self.center_bonus[y][x] 
            for (x,y) in state.board.plr_coords[state.agents[self.id].colour]
        )
    
    def advanced_adjacent_score(self, chips, x, y, team):
        score = 0
        potential = 0
        directions = [(-1,0),(1,0),(0,-1),(0,1),(-1,-1),(1,1),(-1,1),(1,-1)]
        
        for dx, dy in directions:
            consecutive = 1
            for i in range(1, 4):
                nx, ny = x + dx*i, y + dy*i
                if 0 <= nx < 10 and 0 <= ny < 10:
                    if chips[ny][nx] == team:
                        consecutive += 1
                        score += 15 * (2 ** consecutive)  # 指数增长奖励
                    else:
                        break
            if consecutive >= 2:
                potential += 1
        return score, potential

    def evaluate_potential(self, chips, x, y, team):
        potential = 0
        directions = [(dx, dy) for dx in (-1,0,1) for dy in (-1,0,1) if (dx, dy) != (0,0)]
        
        for dx, dy in directions:
            line = []
            for i in range(-3, 4):
                nx, ny = x + dx*i, y + dy*i
                if 0 <= nx < 10 and 0 <= ny < 10:
                    line.append(chips[ny][nx])
                else:
                    line.append(None)
            
            # 检测3+1和2+2模式
            for i in range(4):
                segment = line[i:i+4]
                team_count = segment.count(team)
                empty_count = segment.count(None)
                
                if team_count == 3 and empty_count == 1:
                    potential += 2
                elif team_count == 2 and empty_count == 2:
                    potential += 1
                elif team_count == 1 and empty_count == 3:
                    potential += 0.5
                    
        return potential

    def calculate_danger(self, chips, x, y, opp_team):
        if chips[y][x] != opp_team:
            return 0
            
        danger = 0
        directions = [(-1,0),(1,0),(0,-1),(0,1),(-1,-1),(1,1),(-1,1),(1,-1)]
        
        for dx, dy in directions:
            consecutive = 0
            for i in range(4):
                nx, ny = x + dx*i, y + dy*i
                if 0 <= nx < 10 and 0 <= ny < 10:
                    if chips[ny][nx] == opp_team:
                        consecutive += 1
                    else:
                        break
            danger += {2:30, 3:100, 4:300}.get(consecutive, 0)
                
        return danger

    def near_victory(self, chips, team):
        # 根据实际胜利条件调整（通常需要2个序列）
        team_chips = sum(row.count(team) for row in chips)
        return team_chips >= 15  # 根据实际游戏调整阈值

    def early_game(self, chips):
        total_chips = sum(row.count('r') + row.count('b') for row in chips)
        return total_chips < 20  # 开局阶段判断

    def evaluate_immediate_threat(self, chips, x, y, team):
        threat = 0
        directions = [(-1,0),(1,0),(0,-1),(0,1),(-1,-1),(1,1),(-1,1),(1,-1)]
        
        for dx, dy in directions:
            seq_length = 0
            for i in range(-3, 4):  # 7格范围检测
                nx, ny = x + dx*i, y + dy*i
                if 0 <= nx < 10 and 0 <= ny < 10:
                    if chips[ny][nx] == team:
                        seq_length += 1
                        if seq_length >= 2:
                            threat += 20 * (2 ** seq_length)
                    else:
                        seq_length = 0
        return threat

    # Include all heuristic methods from myTeam.py here (evaluate_immediate_threat, evaluate_potential, etc.)

class myAgent(Agent):
    def __init__(self, _id):
        super().__init__(_id)
        self.id = _id
        self.heuristic = HeuristicAgent(_id)
        self.q_table = QTable()
        self.game_rule = MyGameRule(NUM_PLAYERS)

    def SelectAction(self, actions, game_state):
        root = MCTSNode(self.id, game_state, self.q_table, self.heuristic)
        start_time = time.time()
        
        while time.time() - start_time < THINKTIME:
            node = root
            while node.children and node.untried_actions == []:
                node = node.uct_select()
                
            if node.untried_actions:
                node = node.expand()
                
            result = node.rollout()
            node.backpropagate(result)
        
        if root.children:
            best_child = max(root.children, key=lambda c: c.visits + self.heuristic.evaluate_action(c.state, c.action))
            return best_child.action
        return random.choice(actions)