import numpy as np
from enum import Enum
from gobang_env import *
import json
from sqlalchemy import true
import pickle

PRECISE = 0.00000001


def removeAllItem(valuelist, delValue):
    newValueList = []
    for val in valuelist:
        if (delValue == val):
            continue
        newValueList.append(val)
    return newValueList


class QLearningAgent(object):
    def __init__(self, env, gamma=0.9, e_greed=0.1):
        self.env = env
        self.act_n = env.row * env.col      # 动作维度，有几个动作可选
        self.gamma = gamma      # reward的衰减率
        self.epsilon = e_greed  # 按一定概率随机选动作
        self.Q = {}

    # 根据输入观察值，采样输出的动作值，带探索
    def sample(self, obs, color):
        if np.random.uniform(0, 1) < (1.0 - self.epsilon):  # 根据table的Q值选动作
            action = self.predict(obs, color)
        else:
            while (True):
                flattenPos = np.random.choice(self.act_n)  # 有一定概率随机探索选取一个动作
                x = flattenPos // self.env.col
                y = flattenPos % self.env.col
                action = [x, y, color]
                if (self.env.isValidAction(action)):
                    break

        return action

    # 根据输入观察值，预测输出的动作值

    def predict(self, obs, color):
        if (not obs in self.Q):
            self.Q[obs] = np.zeros((1, self.act_n), dtype=np.int8)
        Q_list = self.Q[obs][0]

        valueList = Q_list
        foundAction = False
        removedLenList = []
        while True:
            extreme = 0
            if (color == BLACK_PIECES):
                extreme = np.min(valueList)
            else:
                extreme = np.max(valueList)

            action_list = np.where(valueList == extreme)[
                0]  # extreme可能对应多个action

            for n in range(0, len(action_list)):
                flattenPos = np.random.choice(action_list)
                x = flattenPos // self.env.col
                y = flattenPos % self.env.col
                action = [x, y, color]
                if (self.env.isValidAction(action)):
                    foundAction = True
                    break

            if (foundAction):
                break

            removedLenList.append(len(action_list))
            valueList = removeAllItem(valueList, extreme)
            if (len(valueList) == 0):
                print(Q_list)
                print(removedLenList)
                raise Exception("there are not candidate action")

        return action

    # 把 Q表格 的数据保存到文件中

    def save(self):
        pickle_file = open('q_table.pkl', 'wb')
        pickle.dump(self.Q, pickle_file)
        pickle_file.close()

    # 从文件中读取数据到 Q表格
    def restore(self, npy_file='./q_table.npy'):
        pickle_file = open('q_table.pkl', 'rb')
        self.Q = pickle.load(pickle_file)
        pickle_file.close()

    def backPropagateValue(self, backtrack, reward):
        nextval = reward
        backtrack.reverse()
        step = 0
        nextAct = 0
        for (obs, act) in backtrack:
            if (not obs in self.Q):
                self.Q[obs] = np.zeros((1, self.act_n), dtype=np.int8)
            color = act[2]
            flattionPos = act[0] * self.env.col + act[1]
            temp = (self.Q[obs][0, flattionPos] + nextval)
            self.Q[obs][0, flattionPos] = - \
                128 if (temp < -128) else (127 if (temp > 127)
                                           else np.int8(temp))
            if (step % 2 == 1):  # 抢占对方最有价值的位置
                mostValuePos = nextAct[0] * self.env.col + nextAct[1]
                temp = self.Q[obs][0, mostValuePos] - nextval
                self.Q[obs][0, mostValuePos] = - \
                    128 if (temp < -128) else (127 if (temp > 127)
                                               else np.int8(temp))

            step = step + 1
            nextAct = act
            nextval = self.gamma * nextval
            if (nextval < 1):
                break

            # print("x:%d,y:%d,color:%d,obs:%d,q_old:%f,q_new:%f" %
            #      (act[0], act[1], color, obs, oldvalue, newvalue))
        return

    def imitativeLearning(self, actionlist, render=False):
        backtrack = []
        reward = 0
        done = 0
        # 重置环境, 重新开一局（即开始新的一个episode）
        obs = self.env.reset()
        print("begin to episode, step:%d" % len(actionlist))
        if (render):
            self.env.render()
        for action in actionlist:
            backtrack.append((obs, action))
            next_obs, reward, done, _ = self.env.step(action)  # 与环境进行一个交互
            if (render):
                self.env.render()
            obs = next_obs  # 存储上一个观察值

        if (not (done and (reward == 100 or reward == -100))):
            raise Exception("invalid episode,done:%d,reward:%d" %
                            (done, reward))

        print(reward)
        self.backPropagateValue(backtrack, reward)


# 读取一盘棋谱(json格式保存)，训练智能体


def train_by_json():
    env = GobangEnv(5, 5, 3)

    # 创建一个agent实例，输入超参数
    agent = QLearningAgent(
        env=env,
        gamma=0.5,
        e_greed=0.1)

    str = "[[1, 1, 1], [1, 2, 2], [1, 3, 1], [2, 1, 2]]"
    actionlist = json.loads(str)
    for act in actionlist:
        agent.imitativeLearning(act, true)
    return


class TestGoBangAgent(unittest.TestCase):

    def test_upper(self):
        self.assertEqual('foo'.upper(), 'FOO')

    def test_isupper(self):
        self.assertTrue('FOO'.isupper())
        self.assertFalse('Foo'.isupper())

    def test_split(self):
        s = 'hello world'
        self.assertEqual(s.split(), ['hello', 'world'])
        # check that s.split fails when the separator is not a string
        with self.assertRaises(TypeError):
            s.split(2)

    def test_one_episode(self):
        row = 5
        col = 5
        env = GobangEnv(row, col, 3)
        # 创建一个agent实例，输入超参数
        agent = QLearningAgent(
            env=env, gamma=0.5, e_greed=0.1)
        backtrack = [[0, 1, 1], [1, 1, 2], [0, 2, 1], [1, 2, 2], [0, 3, 1]]
        agent.imitativeLearning(backtrack, true)

    def test_removeAllItem(self):
        valueList = [1, 2, 3, 1, 5]
        oldvalueList = valueList
        valueList = removeAllItem(valueList, 1)
        self.assertEqual(valueList, [2, 3, 5])
        print(oldvalueList)

    def test_removeAllItem2(self):
        valueList = [0.00000000e+00, 0.00000000e+00,
                     0.00000000e+00, 1.86264515e-07, 0.00000000e+00]
        oldvalueList = valueList
        valueList = removeAllItem(valueList, 1.86264515e-07)
        self.assertEqual(valueList, [0.00000000e+00, 0.00000000e+00,
                                     0.00000000e+00, 0.00000000e+00])
        print(oldvalueList)

    def test_predict(self):
        return


if __name__ == '__main__':
    unittest.main()
