import random
import numpy as np
import math
from collections import OrderedDict
import cv2

class Agent:
    def __init__(self, Q_table_pkl):
        self.EPSILON = 0  # 试探几率
        self.Q_table = Q_table_pkl

    def getEmptyPos(self, state_):  # 返回空位的坐标
        action_space = []
        for i, row in enumerate(state_):
            for j, one in enumerate(row):
                if one == 0: action_space.append((i, j))
        return action_space

    def overTurn(self, state_):  # 翻转状态
        state_tf = state_.copy()
        for i, row in enumerate(state_tf):
            for j, one in enumerate(row):
                if one != 0: state_tf[i][j] *= -1  # 翻转棋子
        return state_tf

    def epsilon_greedy(self, state_, currentMove):  # ε-贪心策略
        state = state_.copy() if currentMove == 'blue' else self.overTurn(state_)  # 如果是红方行动则翻转状态
        Q_Sa = self.Q_table.get(str(state), {})
        maxAction, maxValue, otherAction = [], -float('inf'), []
        for one in Q_Sa:
            current_value = Q_Sa[one]
            if current_value > maxValue:
                maxValue = current_value
        for one in Q_Sa:
            current_value = Q_Sa[one]
            if current_value == maxValue:
                maxAction.append(str2tuple(one))  # 键是如 "(0,0)" 的字符串
            else:
                otherAction.append(str2tuple(one))
        try:
            if maxAction and otherAction:
                action_pos = random.choice(maxAction) if random.random() > self.EPSILON else random.choice(otherAction)
            else:
                action_pos = random.choice(maxAction) if maxAction else random.choice(self.getEmptyPos(state_))
        except:
            action_pos = random.choice(self.getEmptyPos(state_))
        action = {'mark': currentMove, 'pos': action_pos}
        return action

def str2tuple(string):  # Input: '(1,1)'
    parts = string.strip('()').split(',')
    return (int(parts[0]), int(parts[1]))

class Board:
    def __init__(self):
        self.winner = None
        self.state = np.zeros([3, 3])

    def reset(self):
        self.state = np.zeros([3, 3])

    def get_side(self):
        flag = self.state.sum()
        return 'blue' if flag == 0 else 'red'

    def judgeEnd(self, new_state):
        self.state = new_state
        # 检查两对角
        diag1 = self.state[0][0] + self.state[1][1] + self.state[2][2]
        diag2 = self.state[2][0] + self.state[1][1] + self.state[0][2]
        if diag1 == 3 or diag2 == 3:
            self.winner = 'blue'
            return True
        elif diag1 == -3 or diag2 == -3:
            self.winner = 'red'
            return True
        # 检查三行三列
        state_T = self.state.T
        for i in range(3):
            row_sum = sum(self.state[i])
            col_sum = sum(state_T[i])
            if row_sum == 3 or col_sum == 3:
                self.winner = 'blue'
                return True
            elif row_sum == -3 or col_sum == -3:
                self.winner = 'red'
                return True
        # 检查是否还有空位
        for row in self.state:
            if 0 in row:
                return False
        return True

class MCTSNode:
    def __init__(self, state, parent=None, action=None):
        self.state = state
        self.parent = parent
        self.action = action
        self.children = []
        self.visits = 0
        self.wins = 0

    def select(self):
        best_score = -float('inf')
        best_nodes = []
        for child in self.children:
            if child.visits == 0:
                score = float('inf')
            else:
                exploitation = child.wins / child.visits
                exploration = math.sqrt(2 * math.log(self.visits) / child.visits)
                score = exploitation + exploration
            if score > best_score:
                best_score = score
                best_nodes = [child]
            elif score == best_score:
                best_nodes.append(child)
        return random.choice(best_nodes)

    def expand(self, available_actions):
        for action in available_actions:
            new_state = self.state.copy()
            new_state[action[0]][action[1]] = 1  # 假设当前玩家是蓝方
            child = MCTSNode(new_state, parent=self, action=action)
            self.children.append(child)

    def simulate(self):
        current_state = self.state.copy()
        while not self.is_terminal(current_state):
            available_actions = self.get_available_actions(current_state)
            action = self.get_random_action(available_actions)
            current_state = self.perform_action(action, current_state)
        return self.get_result(current_state)

    @staticmethod
    def perform_action(action, state):
        new_state = state.copy()
        new_state[action[0]][action[1]] = 1  # 假设当前玩家是蓝方
        return new_state

    @staticmethod
    def is_terminal(state):
        board = Board()
        board.state = state
        return board.judgeEnd(state)

    @staticmethod
    def get_available_actions(state):
        actions = []
        for i in range(3):
            for j in range(3):
                if state[i][j] == 0:
                    actions.append((i, j))
        return actions

    @staticmethod
    def get_random_action(available_actions):
        return random.choice(available_actions)

    @staticmethod
    def get_result(state):
        board = Board()
        board.state = state
        if board.winner == 'blue':
            return 1
        elif board.winner == 'red':
            return -1
        else:
            return 0

    def backpropagate(self, result):
        self.visits += 1
        self.wins += result
        if self.parent is not None:
            self.parent.backpropagate(result)

class MCTS:
    def __init__(self, root_state, simulations=1000):
        self.root = MCTSNode(root_state)
        self.simulations = simulations

    def search(self):
        for _ in range(self.simulations):
            node = self._select_node(self.root)
            if not MCTSNode.is_terminal(node.state):
                available_actions = MCTSNode.get_available_actions(node.state)
                node.expand(available_actions)
                child = node.select()
                simulation_result = child.simulate()
                child.backpropagate(simulation_result)
            else:
                result = MCTSNode.get_result(node.state)
                node.backpropagate(result)
        best_child = max(self.root.children, key=lambda c: c.visits)
        return best_child.action

    def _select_node(self, node):
        while not MCTSNode.is_terminal(node.state) and node.children:
            node = node.select()
        return node

class Decide:
    def __init__(self, Q_table_pkl):
        self.board = Board()
        self.agent = Agent(Q_table_pkl)
        self.mcts = None

    def play(self, board_state):
        truncated = self.board.judgeEnd(board_state)
        if truncated:
            print('Game Over!')
            print(f'The winner is: {self.board.winner}')
            return None

        currentMove = self.board.get_side()

        # 打印当前玩家和棋盘状态
        print(f"Current player: {currentMove}")
        print("Current board state:")
        print(board_state)

        # 处理空棋盘
        if np.all(board_state == 0):
            action_pos = (1, 1)  # 默认选择中间位置
            output = action_pos[0] * 3 + action_pos[1]
            print(f"Action chosen: {action_pos}")
            return output

        # 使用 Q 表方法获取最优解
        q_action = self.agent.epsilon_greedy(board_state, currentMove)
        print(f"Q-Learning suggested action: {q_action['pos']}")

        # 使用 MCTS 方法获取最优解
        self.mcts = MCTS(board_state)
        mcts_action = self.mcts.search()
        print(f"MCTS suggested action: {mcts_action}")

        # 获取合法空位列表
        valid_actions = self.agent.getEmptyPos(board_state)  # 使用 self.agent 调用 getEmptyPos
        q_action_pos = q_action['pos']
        mcts_action_pos = mcts_action

        # 打印合法空位列表
        print(f"Valid actions: {valid_actions}")

        # 确保行动未被占据
        selected_action = None
        if q_action_pos in valid_actions and mcts_action_pos in valid_actions:
            # 比较两种方法的结果
            if q_action_pos == mcts_action_pos:
                selected_action = q_action_pos
            else:
                # 随机选择一个行动
                selected_action = mcts_action_pos
        else:
            # 如果两个行动都不合法，选择一个随机空位
            selected_action = mcts_action_pos

        print(f"Selected action: {selected_action}")
        output = selected_action[0] * 3 + selected_action[1]
        print(f"Action encoded as: {output}")
        return output
