import numpy as np
from enum import Enum
from gobang_agent import *
from sqlalchemy import true


def run_episode(env, agent, color, render=False):
    total_steps = 0  # 记录每个episode走了多少step

    obs = env.reset()  # 重置环境, 重新开一局（即开始新的一个episode）
    backtrack = []

    while True:
        action = agent.sample(obs, color)  # 根据算法选择一个动作
        backtrack.append((obs, action))
        next_obs, reward, done, _ = env.step(
            action)  # 与环境进行一个交互
        # 训练 Q-learning算法
        # agent.learn(obs, action, reward, next_obs, done)

        color = BLACK_PIECES if (WHITE_PIECES == color) else WHITE_PIECES
        obs = next_obs  # 存储上一个观察值
        # print("reward:%d" % reward)
        # 回溯更新Q表
        if (reward == 100 or reward == -100):
            agent.backPropagateValue(backtrack, reward)
        total_steps += 1  # 计算step数
        if render:
            env.render()  # 渲染新的一帧图形
        if done:
            env.render()  # 渲染最后一帧图形
            break

    winner = WHITE_PIECES if (reward == 100) else BLACK_PIECES
    return total_steps, winner


def test_episode(env, agent):
    reward = 0
    total_steps = 0
    color = WHITE_PIECES
    obs = env.reset()
    while True:
        action = agent.predict(obs, color)  # greedy
        next_obs, reward, done, _ = env.step(action)
        total_steps += 1  # 计算step数
        # time.sleep(0.5)
        # env.render()
        if done:
            break
        obs = next_obs
        color = BLACK_PIECES if (WHITE_PIECES == color) else WHITE_PIECES

    winner = WHITE_PIECES if (reward == 100) else BLACK_PIECES
    return reward, total_steps, winner


if __name__ == '__main__':
    env = GobangEnv(GOBANG_ROW, GOBANG_COL, GOBANG_GROUP)

    # 创建一个agent实例，输入超参数
    agent = QLearningAgent(
        env=env,
        gamma=0.5,
        e_greed=0.1)

    # 训练500个episode，打印每个episode的分数
    for episode in range(10000):
        ep_steps, color = run_episode(env, agent, WHITE_PIECES, False)
        print('Episode %s: steps = %s , winner:%s' %
              (episode, ep_steps, "white" if (color == WHITE_PIECES) else "black"))

    agent.save()
    # agent.restore()

    # 全部训练结束，查看算法效果
    test_reward, total_step, color = test_episode(env, agent)
    print('test reward = %.1f,step = %d, winner:%s' %
          (test_reward, total_step, "white" if (color == WHITE_PIECES) else "black"))
