import random
import math
import time
import pygame
import numpy as np
import pickle  # 保存/读取字典

class Agent:
    def __init__(self):
        self.Q_table = Q_table_pkl
        self.EPSILON = 0  # 试探几率

    def getEmptyPos(self, state_):  # 返回空位的坐标
        action_space = []
        for i, row in enumerate(state_):
            for j, one in enumerate(row):
                if one == 0: action_space.append((i, j))
        return action_space

    def randomAction(self, state_, mark):  # 随机选择空格动作
        actions = self.getEmptyPos(state_)
        action_pos = random.choice(actions)
        action = {'mark': mark, 'pos': action_pos}
        return action

    def overTurn(self, state_):  # 翻转状态
        state_tf = state_.copy()
        for i, row in enumerate(state_tf):
            for j, one in enumerate(row):
                if one != 0: state_tf[i][j] *= -1
        return state_tf

    def epsilon_greedy(self, state_, currentMove):  # ε-贪心策略
        state = state_.copy() if currentMove == 'blue' else self.overTurn(state_)  # 如果是红方行动则翻转状态
        Q_Sa = self.Q_table[str(state)]
        maxAction, maxValue, otherAction = [], -100, []
        for one in Q_Sa:
            if Q_Sa[one] > maxValue:
                maxValue = Q_Sa[one]
        for one in Q_Sa:
            if Q_Sa[one] == maxValue:
                maxAction.append(str2tuple(one))
            else:
                otherAction.append(str2tuple(one))
        try:
            action_pos = random.choice(maxAction) if random.random() > self.EPSILON else random.choice(otherAction)
        except:  # 处理从空的otherAction中取值的情况
            action_pos = random.choice(maxAction)
        action = {'mark': currentMove, 'pos': action_pos}
        return action

class Board:
    def __init__(self):
        self.winner = None
        self.state = np.zeros([3, 3])

    def reset(self):
        self.state = np.zeros([3, 3])

    def get_side(self):
        flag = self.state.sum()
        if flag == 0:
            return 'blue'  # 蓝对应 black: 1，red 对应 white: -1
        else:
            return 'red'

    def judgeEnd(self, new_state):
        self.state = new_state
        # 检查两对角
        check_diag_1 = self.state[0][0] + self.state[1][1] + self.state[2][2]
        check_diag_2 = self.state[2][0] + self.state[1][1] + self.state[0][2]
        if check_diag_1 == 3 or check_diag_2 == 3:
            self.winner = 'blue'
            return True
        elif check_diag_1 == -3 or check_diag_2 == -3:
            self.winner = 'red'
            return True
        # 检查三行三列
        state_T = self.state.T
        for i in range(3):
            check_row = sum(self.state[i])  # 检查行
            check_col = sum(state_T[i])  # 检查列
            if check_row == 3 or check_col == 3:
                self.winner = 'blue'
                return True
            elif check_row == -3 or check_col == -3:
                self.winner = 'red'
                return True
        # 检查整个棋盘是否还有空位
        empty = []
        for i in range(3):
            for j in range(3):
                if self.state[i][j] == 0:
                    empty.append((i, j))
        if not empty:
            return True

        return False


def str2tuple(string):  # Input: '(1,1)'
    string2list = list(string)
    return int(string2list[1]), int(string2list[4])  # Output: (1,1)


class Decide:
    def __init__(self):
        self.board = Board()
        self.agent = Agent()

    def play(self, board_state):
        truncated = self.board.judgeEnd(board_state)
        if truncated:
            print('Game Over!')
            print('The winner is: ', self.board.winner)
            return None

        currentMove = self.board.get_side()
        print(currentMove)

        action = self.agent.epsilon_greedy(board_state, currentMove)
        print(action)
        output = (lambda row, col, cols=3: row * cols + col)(action['pos'][0], action['pos'][1])
        print(output)
        return output


if __name__ == '__main__':
    # 读取Q表格
    with open('Q_table_dict.pkl', 'rb') as f:
        Q_table_pkl = pickle.load(f)
    dec = Decide()

    board_state = np.array([[1, 1, 0],
                            [0, -1, 0],
                            [-1, 0, 0]], dtype=float)
    print(board_state)
    dec.play(board_state)
