import math
import argparse
import numpy as np
import curses
from copy import deepcopy
from dgo3 import GoBoard  # 从 dgo3.py 中导入 GoBoard 类
import time
import pickle
import pygame  # 用于绘制奖励曲线
import turtle  # 用于绘制奖励曲线

# 继承 GoBoard 类，并添加 reset 方法
class CustomGoBoard(GoBoard):

    def __init__(self, stdscr, size):
        # 调用父类 GoBoard 的 __init__ 方法
        # 由于 GoBoard 的 __init__ 方法需要 file_name 参数，你可以传递一个占位符或实际文件名
        super().__init__(stdscr, file_name="game_output4.txt")  # 使用 "dummy.sgf" 作为占位符文件名
        self.size = size  # 添加 size 属性

    def make_move(self, move, player):
        if self.board[move[0]][move[1]] == 0:
            self.board[move[0]][move[1]] = player
            return True
        return False

    def reset(self):
        self.board = [[0] * 19 for _ in range(19)]
        self.current_player = 1
        self.print_board()

    def check_draw(self):
        for row in range(self.size):
            for col in range(self.size):
                if self.board[row][col] == 0:
                    return False
        return True

    def check_three_in_a_row(self, player):
        # 检查行
        for row in range(self.size):
            for col in range(self.size - 2):
                if all(self.board[row][col + i] == player for i in range(3)):
                    return True
        # 检查列
        for col in range(self.size):
            for row in range(self.size - 2):
                if all(self.board[row + i][col] == player for i in range(3)):
                    return True
        # 检查对角线（左上到右下）
        for row in range(self.size - 2):
            for col in range(self.size - 2):
                if all(self.board[row + i][col + i] == player for i in range(3)):
                    return True
        # 检查对角线（右上到左下）
        for row in range(self.size - 2):
            for col in range(2, self.size):
                if all(self.board[row + i][col - i] == player for i in range(3)):
                    return True
        return False

    def check_four_in_a_row(self, player):
        # 检查行
        for row in range(self.size):
            for col in range(self.size - 3):
                if all(self.board[row][col + i] == player for i in range(4)):
                    return True
        # 检查列
        for col in range(self.size):
            for row in range(self.size - 3):
                if all(self.board[row + i][col] == player for i in range(4)):
                    return True
        # 检查对角线（左上到右下）
        for row in range(self.size - 3):
            for col in range(self.size - 3):
                if all(self.board[row + i][col + i] == player for i in range(4)):
                    return True
        # 检查对角线（右上到左下）
        for row in range(self.size - 3):
            for col in range(3, self.size):
                if all(self.board[row + i][col - i] == player for i in range(4)):
                    return True
        return False

# 定义环境类
class Environment:
    def __init__(self, size=19):
        self.size = size
        self.stdscr = curses.initscr()
        curses.start_color()
        curses.init_pair(1, curses.COLOR_BLACK, curses.COLOR_WHITE)
        curses.init_pair(2, curses.COLOR_WHITE, curses.COLOR_BLACK)
        self.board = CustomGoBoard(self.stdscr, size=size)  # 使用继承的 CustomGoBoard 类
        self.state = [[0] * size for _ in range(size)]
        self.current_player = 1
        self.episode_rewards = []

    def reset(self):
        self.board.reset()  # 调用 CustomGoBoard 的 reset 方法
        self.state = [[0] * self.size for _ in range(self.size)]
        self.current_player = 1
        return self.state

    def step(self, action):
        row, col = action
        if self.board.make_move(action, self.current_player):
            self.board.print_board(False)
            # time.sleep(0.1)

            reward = 0.1  # 默认合法动作奖励

            if self.board.check_win(self.current_player):
                reward = 10  # 赢棋奖励
                done = True
            elif self.board.check_draw():
                reward = 0.5  # 平局奖励
                done = True
            else:
                # 检查是否形成三连、四连等
                if self.board.check_three_in_a_row(self.current_player):
                    reward += 0.5
                if self.board.check_four_in_a_row(self.current_player):
                    reward += 1
                # 检查是否被对手形成三连、四连
                opponent = -self.current_player
                if self.board.check_three_in_a_row(opponent):
                    reward -= 0.5
                if self.board.check_four_in_a_row(opponent):
                    reward -= 1
                done = False
                self.current_player = -self.current_player
        else:
            reward = -1  # 非法动作惩罚
            done = False
        self.state[row][col] = self.current_player
        return self.state, reward, done

    def render(self):
        self.board.print_board(False)

    def close(self):
        curses.endwin()

    def check_space(self):
        """当用户按下空格键时，调出棋盘"""
        self.board.print_board(True)
        key = self.stdscr.getch()
        while key != ord(' '):  # 等待用户按下空格键
            key = self.stdscr.getch()
        self.board.print_board(False)

class QLearningAgent:
    def __init__(self, size, epsilon=0.1, alpha=0.1, gamma=0.9):
        self.size = size
        self.epsilon = epsilon
        self.alpha = alpha
        self.gamma = gamma
        self.q_table = {}

    def get_state_key(self, state):
        # 将 state 转换为元组，以便用作字典键
        return tuple(map(tuple, state))

    def get_valid_actions(self, state):
        # 获取所有合法动作
        return [(r, c) for r in range(self.size) for c in range(self.size) if state[r][c] == 0]

    def choose_action(self, state):
        valid_actions = self.get_valid_actions(state)
    
        # 如果 valid_actions 为空，返回 None 或其他处理方式
        if not valid_actions:
            return None
    
        if np.random.rand() < self.epsilon:
            # 探索：随机选择合法动作
            return valid_actions[np.random.randint(len(valid_actions))]
        else:
            # 利用：选择 Q 值最大的合法动作
            state_key = self.get_state_key(state)
            if state_key not in self.q_table:
                self.q_table[state_key] = np.zeros((self.size, self.size))
            q_values = self.q_table[state_key]
        
            # 获取合法动作的 Q 值
            q_values_valid = [q_values[r, c] for r, c in valid_actions]
        
            # 选择 Q 值最大的动作
            best_action_index = np.argmax(q_values_valid)
            return valid_actions[best_action_index]

    def update_q_table(self, state, action, reward, next_state):
        state_key = self.get_state_key(state)
        next_state_key = self.get_state_key(next_state)

        if next_state_key not in self.q_table:
            self.q_table[next_state_key] = np.zeros((self.size, self.size))

        max_q_next = np.max(self.q_table[next_state_key])
        q_value = self.q_table[state_key][action[0], action[1]]

        self.q_table[state_key][action[0], action[1]] = q_value + self.alpha * (reward + self.gamma * max_q_next - q_value)

def train_agent(agent, env, episodes=1000):
    total_rewards = []  # 初始化 total_rewards
    for episode in range(episodes):
        state = env.reset()
        done = False
        episode_reward = 0
        while not done:
            action = agent.choose_action(state)
            if action is None:
                break  # 没有合法动作时停止
            next_state, reward, done = env.step(action)
            agent.update_q_table(state, action, reward, next_state)
            state = next_state
            episode_reward += reward
            if (episode + 1) % 10 == 0:
                print(f"Episode {episode + 1}/{episodes}, Action: {action}, Reward: {reward}")
        total_rewards.append(episode_reward)
        if (episode + 1) % 100 == 0:
            with open(f'q_table_episode_{episode + 1}.pkl', 'wb') as f:
                pickle.dump(agent.q_table, f)
            print(f"Q表已保存至 q_table_episode_{episode + 1}.pkl")
        # DEBUG 隐藏！
        # env.check_space()  # 检查训练状态
    env.close()
    draw_rewards(total_rewards)  # 绘制奖励曲线

def draw_rewards(rewards):
    """绘制奖励曲线"""
    pygame.init()
    screen = pygame.display.set_mode((800, 600))
    pygame.display.set_caption("Reward Curve")
    screen.fill((255, 255, 255))  # 填充白色背景

    # 动态调整 y 轴比例
    max_reward = max(rewards) if rewards else 1  # 防止除零错误
    min_reward = min(rewards) if rewards else 0
    reward_range = max_reward - min_reward if max_reward != min_reward else 1

    # 绘制 x 轴和 y 轴
    pygame.draw.line(screen, (0, 0, 0), (50, 550), (750, 550), 2)  # x 轴
    pygame.draw.line(screen, (0, 0, 0), (50, 50), (50, 550), 2)    # y 轴

    # 绘制 x 轴刻度
    for i in range(0, len(rewards), len(rewards) // 10):  # 每隔 10% 绘制一个刻度
        x = 50 + i * (700 / len(rewards))
        pygame.draw.line(screen, (0, 0, 0), (x, 550), (x, 545), 2)  # 刻度线
        label = f"{i}"
        text_surface = pygame.font.Font(None, 24).render(label, True, (0, 0, 0))
        screen.blit(text_surface, (x - 10, 560))  # 刻度标签

    # 绘制 y 轴刻度
    for i in range(0, 11):  # 绘制 11 个刻度 (0 到 100%)
        y = 550 - i * 50
        pygame.draw.line(screen, (0, 0, 0), (50, y), (55, y), 2)  # 刻度线
        reward_value = min_reward + (i * reward_range / 10)
        label = f"{reward_value:.2f}"
        text_surface = pygame.font.Font(None, 24).render(label, True, (0, 0, 0))
        screen.blit(text_surface, (10, y - 10))  # 刻度标签

    # 计算每个点的位置
    points = []
    for i, reward in enumerate(rewards):
        x = 50 + i * (700 / len(rewards))  # 动态调整 x 轴
        y = 550 - int(((reward - min_reward) / reward_range) * 500)  # 归一化并映射到屏幕高度
        points.append((int(x), int(y)))

    # 绘制平滑的奖励曲线
    pygame.draw.lines(screen, (0, 0, 255), False, points, 2)

    pygame.display.flip()

    # 事件循环，允许用户手动关闭窗口
    running = True
    while running:
        for event in pygame.event.get():
            if event.type == pygame.QUIT:
                running = False

    pygame.quit()


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="Gobang Game")
    parser.add_argument("--size", type=int, default=19, choices=range(5, 20), help="Size of the game board (5-19)")
    args = parser.parse_args()
    board_size = args.size

    agent = QLearningAgent(size=board_size)
    env = Environment(size=board_size)
    train_agent(agent, env, episodes=1000)