import time
import random
import math
import numpy as np
import torch
import torch.nn as nn
from collections import defaultdict

# 全局配置
BOARD_SIZE = 15
EMPTY = -1
BLACK = 1
WHITE = 0
TIME_LIMIT = 5
C_PUCT = 1.5
MCTS_SIMULATIONS = 200
NET_BATCH_SIZE = 8
DIRECTIONS = [(1,0), (0,1), (1,1), (1,-1)]

class BoardUtils:
    @staticmethod
    def is_win(board, last_move=None):
        """通用胜利检测"""
        if last_move:
            x, y = last_move
            color = board[x][y]
            for dx, dy in DIRECTIONS:
                count = 1
                for d in [-1, 1]:
                    step = 1
                    while True:
                        nx = x + dx * step * d
                        ny = y + dy * step * d
                        if 0 <= nx < BOARD_SIZE and 0 <= ny < BOARD_SIZE and board[nx][ny] == color:
                            count += 1
                            step += 1
                        else:
                            break
                if count >= 5:
                    return True
            return False
        
        # 全盘扫描（备用方案）
        for x in range(BOARD_SIZE):
            for y in range(BOARD_SIZE):
                if board[x][y] == EMPTY:
                    continue
                for dx, dy in DIRECTIONS:
                    count = 1
                    for i in range(1,5):
                        nx = x + dx * i
                        ny = y + dy * i
                        if 0 <= nx < BOARD_SIZE and 0 <= ny < BOARD_SIZE and board[nx][ny] == board[x][y]:
                            count +=1
                        else:
                            break
                    if count >=5:
                        return True
        return False

    @staticmethod
    def generate_moves(board, color, mode='main'):
        """通用候选生成"""
        empty_pos = []
        center = BOARD_SIZE // 2
        
        if mode == 'main':
            # 主搜索生成策略
            for x in range(BOARD_SIZE):
                for y in range(BOARD_SIZE):
                    if board[x][y] == EMPTY:
                        priority = -sum(1 for dx,dy in [(-1,0),(1,0),(0,-1),(0,1)] 
                                      if 0<=x+dx<BOARD_SIZE and 0<=y+dy<BOARD_SIZE 
                                      and board[x+dx][y+dy] != EMPTY)
                        empty_pos.append((x, y, priority))
            empty_pos.sort(key=lambda x: (x[2], -abs(x[0]-center)-abs(x[1]-center)))
            return [(x,y) for x,y,_ in empty_pos[:20]]
        
        else:  # rollout模式
            # 邻近优先策略
            candidates = []
            for x in range(BOARD_SIZE):
                for y in range(BOARD_SIZE):
                    if board[x][y] == EMPTY:
                        for dx, dy in [(-1,0),(1,0),(0,-1),(0,1)]:
                            nx, ny = x+dx, y+dy
                            if 0<=nx<BOARD_SIZE and 0<=ny<BOARD_SIZE and board[nx][ny] != EMPTY:
                                candidates.append((x,y))
                                break
            if not candidates:
                candidates = [(i,j) for i in range(center-2, center+3) 
                             for j in range(center-2, center+3) if board[i][j]==EMPTY]
            return candidates[:12]

class PolicyValueNet(nn.Module):
    """策略价值网络"""
    def __init__(self):
        super().__init__()
        self.conv = nn.Sequential(
            nn.Conv2d(3, 128, 3, padding=1),
            nn.BatchNorm2d(128),
            nn.ReLU(),
            nn.Conv2d(128, 128, 3, padding=1),
            nn.BatchNorm2d(128),
            nn.ReLU()
        )
        self.policy_head = nn.Sequential(
            nn.Conv2d(128, 4, 1),
            nn.BatchNorm2d(4),
            nn.ReLU(),
            nn.Flatten(),
            nn.Linear(4*BOARD_SIZE*BOARD_SIZE, BOARD_SIZE*BOARD_SIZE),
            nn.Softmax(dim=1)
        )
        self.value_head = nn.Sequential(
            nn.Conv2d(128, 2, 1),
            nn.BatchNorm2d(2),
            nn.ReLU(),
            nn.Flatten(),
            nn.Linear(2*BOARD_SIZE*BOARD_SIZE, 256),
            nn.ReLU(),
            nn.Linear(256, 1),
            nn.Tanh()
        )

    def forward(self, x):
        x = self.conv(x)
        p = self.policy_head(x)
        v = self.value_head(x)
        return p, v

class MCTSNode:
    """MCTS节点"""
    def __init__(self, parent=None, move=None, color=BLACK):
        self.parent = parent
        self.move = move
        self.children = []
        self.visits = 0
        self.q_value = 0.0
        self.p_value = 0.0
        self.color = color
        self.board = None

class GomokuAI:
    """集成AI核心"""
    def __init__(self, model_path=None):
        self.net = PolicyValueNet()
        if model_path:
            self.net.load_state_dict(torch.load(model_path))
        self.trans_table = defaultdict(dict)
        self.history_heuristic = np.zeros((BOARD_SIZE, BOARD_SIZE))
        
    def get_move(self, board, is_black):
        """主入口"""
        if np.sum(board != EMPTY) < 3:  # 开局库
            return (7,7) if board[7][7]==EMPTY else (7,8)
            
        start_time = time.time()
        best_move = self.mcts_search(board, is_black, start_time)
        return best_move
    
    def mcts_search(self, board, is_black, start_time):
        """蒙特卡洛树搜索"""
        root = MCTSNode(color=BLACK if is_black else WHITE)
        while time.time() - start_time < TIME_LIMIT * 0.8:
            self._tree_policy(root, np.copy(board))
        return max(root.children, key=lambda n: n.visits).move
    
    def _tree_policy(self, node, board):
        """树策略"""
        path = []
        while node.children:
            node = self._select_child(node)
            path.append(node)
            board[node.move] = node.color
            
        if not BoardUtils.is_win(board) and node.visits > 0:
            self._expand_node(node, board)
            node = random.choice(node.children)
            path.append(node)
            board[node.move] = node.color
            
        value = self._simulate(board, node.color)
        self._backpropagate(path, value)
    
    def _select_child(self, node):
        """UTC选择"""
        total = math.log(sum(c.visits for c in node.children))
        return max(node.children, 
                 key=lambda c: c.q_value/(c.visits+1e-8) + 
                 C_PUCT*c.p_value*math.sqrt(total/(c.visits+1e-8)))
    
    def _expand_node(self, node, board):
        """节点扩展"""
        state = self._board_to_tensor(board, node.color)
        with torch.no_grad():
            policy, _ = self.net(state)
            
        legal_moves = BoardUtils.generate_moves(board, node.color, 'rollout')
        probs = policy[0].numpy().reshape(BOARD_SIZE, BOARD_SIZE)
        total = sum(probs[x][y] for x,y in legal_moves)
        
        for move in legal_moves:
            child = MCTSNode(parent=node, move=move, color=3-node.color)
            child.p_value = probs[move[0]][move[1]] / total
            child.board = np.copy(board)
            node.children.append(child)
    
    def _simulate(self, board, color):
        """模拟策略"""
        for _ in range(10):  # 有限深度模拟
            moves = BoardUtils.generate_moves(board, color, 'rollout')
            if not moves:
                return 0
                
            # 神经网络批量推理
            states = [self._board_to_tensor(board, color) for _ in range(NET_BATCH_SIZE)]
            with torch.no_grad():
                policies, _ = self.net(torch.cat(states))
            
            move_probs = policies.mean(dim=0).reshape(BOARD_SIZE, BOARD_SIZE)
            x, y = max(moves, key=lambda m: move_probs[m[0]][m[1]])
            board[x][y] = color
            if BoardUtils.is_win(board, (x,y)):
                return 1 if color == BLACK else -1
            color = WHITE if color == BLACK else BLACK
        return 0
    
    def _board_to_tensor(self, board, color):
        """棋盘编码"""
        planes = np.zeros((3, BOARD_SIZE, BOARD_SIZE), np.float32)
        planes[0] = (board == color)                 # 当前方棋子
        planes[1] = (board == (WHITE if color == BLACK else BLACK))  # 对手棋子
        planes[2] = np.indices((15,15)).sum(axis=0)%2  # 棋盘位置特征
        return torch.from_numpy(planes).unsqueeze(0)
    
    def alpha_beta_search(self, board, is_black, start_time):
        """Alpha-Beta搜索"""
        best_move = (-1, -1)
        best_score = -np.inf
        depth = 2
        
        while depth <= 4 and time.time()-start_time < TIME_LIMIT:
            score, move = self._ab_search(board, is_black, depth, start_time)
            if score > best_score:
                best_score, best_move = score, move
            depth += 1
        return best_move
    
    def _ab_search(self, board, is_black, depth, start_time, alpha=-np.inf, beta=np.inf):
        """递归搜索"""
        if depth == 0 or BoardUtils.is_win(board):
            return self._evaluate(board, is_black), None
            
        best_score = -np.inf
        best_move = None
        color = BLACK if is_black else WHITE
        
        for move in BoardUtils.generate_moves(board, color, 'main'):
            if time.time() > start_time + TIME_LIMIT:
                break
                
            board[move] = color
            score = -self._ab_search(board, not is_black, depth-1, start_time, -beta, -alpha)[0]
            board[move] = EMPTY
            
            if score > best_score:
                best_score = score
                best_move = move
                alpha = max(alpha, score)
                if alpha >= beta:
                    self.history_heuristic[move] += depth**2
                    break
        return best_score, best_move
    
    def _evaluate(self, board, is_black):
        """混合评估"""
        if BoardUtils.is_win(board):
            return np.inf if is_black else -np.inf
            
        # 静态评估
        score = 0
        for x in range(BOARD_SIZE):
            for y in range(BOARD_SIZE):
                if board[x][y] == EMPTY:
                    continue
                for dx, dy in DIRECTIONS:
                    line = [board[x+i*dx][y+i*dy] if 0<=x+i*dx<BOARD_SIZE and 0<=y+i*dy<BOARD_SIZE else None 
                           for i in range(-4,5)]
                    for i in range(5):
                        seg = line[i:i+5]
                        cnt_b = seg.count(BLACK)
                        cnt_w = seg.count(WHITE)
                        if cnt_b == 5: score += 1e6
                        if cnt_w == 5: score -= 1e6
                        if cnt_b == 4: score += 1e4
                        if cnt_w == 4: score -= 1e4
        
        # 蒙特卡洛补充
        if abs(score) < 1e4:
            wins = 0
            for _ in range(50):
                result = self._fast_simulate(board.copy(), is_black)
                wins += 1 if result > 0 else 0
            score += (wins/50 - 0.5) * 1e5
        return score if is_black else -score
    
    def _fast_simulate(self, board, is_black):
        """快速模拟"""
        color = BLACK if is_black else WHITE
        for _ in range(10):
            moves = BoardUtils.generate_moves(board, color, 'rollout')
            if not moves:
                return 0
            x,y = random.choice(moves)
            board[x,y] = color
            if BoardUtils.is_win(board, (x,y)):
                return 1 if color == BLACK else -1
            color = WHITE if color == BLACK else BLACK
        return 0
class Game:
    DIRECTIONS = [(1, 0), (0, 1), (1, 1), (1, -1)]
    EMPTY = 0

    @staticmethod
    def is_win(board, last_move=None):
        """通用胜利检测"""
        if not isinstance(board, list) or not all(isinstance(row, list) for row in board):
            raise ValueError("Board must be a 2D list")
        
        BOARD_SIZE = len(board)
        if BOARD_SIZE == 0 or any(len(row) != BOARD_SIZE for row in board):
            raise ValueError("Board must be a square matrix")

        if last_move:
            return Game.check_last_move(board, last_move, BOARD_SIZE)
        
        # 全盘扫描（备用方案）
        for x in range(BOARD_SIZE):
            for y in range(BOARD_SIZE):
                if board[x][y] != Game.EMPTY and Game.check_from_position(board, x, y, BOARD_SIZE):
                    return True
        return False

    @staticmethod
    def check_last_move(board, last_move, board_size):
        x, y = last_move
        color = board[x][y]
        for dx, dy in Game.DIRECTIONS:
            count = 1 + Game.count_in_direction(board, x, y, dx, dy, color, board_size) \
                    + Game.count_in_direction(board, x, y, -dx, -dy, color, board_size)
            if count >= 5:
                return True
        return False

    @staticmethod
    def check_from_position(board, x, y, board_size):
        color = board[x][y]
        for dx, dy in Game.DIRECTIONS:
            count = 1 + Game.count_in_direction(board, x, y, dx, dy, color, board_size) \
                    + Game.count_in_direction(board, x, y, -dx, -dy, color, board_size)
            if count >= 5:
                return True
        return False

    @staticmethod
    def count_in_direction(board, x, y, dx, dy, color, board_size):
        count = 0
        nx, ny = x + dx, y + dy
        while 0 <= nx < board_size and 0 <= ny < board_size and board[nx][ny] == color:
            count += 1
            nx += dx
            ny += dy
        return count