import numpy as np
from enum import Enum
from gobang_agent import *
from sqlalchemy import true
from gobang_agent import *


def run_episode_from_status(env, agent, color, status, render=False):
    total_steps = 0  # 记录每个episode走了多少step
    print(status)
    obs = env.reset_with_status(status)  # 重置环境, 重新开一局（即开始新的一个episode）
    print(obs)
    backtrack = []

    while True:
        action = agent.sample(obs, color)  # 根据算法选择一个动作
        backtrack.append((obs, action))
        next_obs, reward, done, _ = env.step(
            action)  # 与环境进行一个交互
        # 训练 Q-learning算法
        # agent.learn(obs, action, reward, next_obs, done)

        color = BLACK_PIECES if (WHITE_PIECES == color) else WHITE_PIECES
        obs = next_obs  # 存储上一个观察值
        # print("reward:%d" % reward)
        # 回溯更新Q表
        if (reward == 100 or reward == -100):
            agent.backPropagateValue(backtrack, reward)
        total_steps += 1  # 计算step数
        if render:
            env.render()  # 渲染新的一帧图形
        if done:
            print("reward:%d" % (reward))
            env.render()  # 渲染新的一帧图形
            break

    winner = WHITE_PIECES if (reward == 100) else BLACK_PIECES
    return total_steps, winner


class GobangEngine(object):
    backtrack = []

    def __init__(self, row, col, group):
        self.env = GobangEnv(row, col, group)
        # 创建一个agent实例，输入超参数
        self.agent = QLearningAgent(env=self.env)
        self.backtrack = []
        self.agent.restore()

    # 人类选手动作
    def man_act(self, action):
        obs = self.env.status
        next_obs, reward, done, _ = self.env.step(action)
        self.backtrack.append((obs, action))
        if (reward == 100 or reward == -100):  # 有一方赢了，更新Q表并保存
            self.agent.backPropagateValue(self.backtrack, reward)
            self.agent.save()
        return done

    def reset(self):
        self.backtrack = []
        self.env.reset()

    # ai自动动作
    def ai_act(self, color):
        obs = self.env.status
        print(obs)
        print(self.env.chessboard)
        print("===========基于当前状态训练n个episode")
        # 基于当前状态训练n个episode
        # 训练500个episode，打印每个episode的分数
        for episode in range(5):
            ep_steps, color_ = run_episode_from_status(
                self.env, self.agent, color, obs, False)

        self.env.reset_with_status(obs)
        # 然后取当前状态的最优动作
        action = self.agent.predict(obs, color)  # greedy
        next_obs, reward, done, _ = self.env.step(action)
        self.backtrack.append((obs, action))
        if (reward == 100 or reward == -100):  # 有一方赢了，更新Q表并保存
            self.agent.backPropagateValue(self.backtrack, reward)
            self.agent.save()
        return action, done

    def isValidAction(self, action):
        return self.isValidAction(action)


if __name__ == '__main__':
    actionlist = [[3, 2, 1], [0, 4, 2], [3, 3, 1],
                  [3, 2, 2], [4, 4, 1], [0, 0, 2], [0, 3, 1]]

    gobang = GobangEngine()
    gobang.man_act([3, 2, 1])
    gobang.ai_act(2)
    gobang.man_act([1, 4, 1])
    gobang.ai_act(2)
    gobang.man_act([4, 3, 1])
    gobang.ai_act(2)
    gobang.man_act([0, 0, 1])
    gobang.ai_act(2)
    gobang.env.render()
