import math
import matplotlib.pyplot as plt
import numpy as np
from qlearning_test.TSPEnv import TSPEnv


class QLearningAgent(object):

    def __init__(self, obs_n, act_n, epsilon=1, epsilon_min=0.01, epsilon_decay=0.999, learning_rate=0.8, gamma=0.9):
        self.act_n = act_n  # 动作维度，有几个动作可选
        self.epsilon = epsilon  # 按一定贪婪概率随机选动作
        self.epsilon_min = epsilon_min  # 最小的贪婪概率
        self.epsilon_decay = epsilon_decay  # 贪婪概率衰减的速度
        self.lr = learning_rate  # 学习率
        self.gamma = gamma  # reward的衰减率
        self.Q = np.zeros((obs_n, act_n))
        self.states_memory = []

    # 根据输入观察值，采样输出的动作值，带探索
    def act(self, obs):
        q = np.copy(self.Q[obs, :])
        # 屏蔽已经走过的节点 这个地方应该可以加约束条件起到mask一些节点的作用
        q[self.states_memory] = -np.inf
        # 如果rand值（0-1）大于当前贪婪系数，则贪婪即选取当前最优
        if np.random.rand() > self.epsilon:
            action = np.argmax(q)
        else:
            action = np.random.choice([x for x in range(self.act_n) if x not in self.states_memory])
        return action

    # 根据输入观察值，预测输出的动作值
    # def predict(self, obs):
    #     Q_list = self.Q[obs, :]
    #     maxQ = np.max(Q_list)
    #     action_list = np.where(Q_list == maxQ)[0]  # maxQ可能对应多个action
    #     action = np.random.choice(action_list)
    #     return action

    # 学习方法，也就是更新Q-table的方法
    def train(self, obs, action, reward, next_obs, done):
        predict_Q = self.Q[obs, action]
        if done:
            target_Q = reward  # 没有下一个状态了
        else:
            target_Q = reward + self.gamma * np.max(self.Q[next_obs, action]) - self.Q[obs, action]

        self.Q[obs, action] = predict_Q + self.lr * target_Q
        # 贪婪率递减
        if self.epsilon > self.epsilon_min:
            self.epsilon *= self.epsilon_decay

    # def learn(self, obs, action, reward, next_obs, next_action, done):
    #     predict_Q = self.Q[obs, action]
    #     if done:
    #         target_Q = reward  # 没有下一个状态了
    #     else:
    #         target_Q = reward + self.gamma * self.Q[next_obs, next_action]  # Sarsa
    #     self.Q[obs, action] += self.lr * (target_Q - predict_Q)  # 修正q

    def remember_state(self, s):
        self.states_memory.append(s)

    def reset_memory(self):
        self.states_memory = []

    # 保存Q表格数据到文件
    def save(self):
        npy_file = './q_tsp_table.npy'
        np.save(npy_file, self.Q)
        print(npy_file + ' saved.')

    # 从文件中读取Q值到Q表格中
    def load(self, npy_file='./q_tsp_table.npy'):
        self.Q = np.load(npy_file)
        print(npy_file + ' loaded.')


# 训练一个epoch，每次都环境都重开
def train_each_epoch(env, agent):
    # 记录每个epoch走了多少step
    # total_steps = 0
    epoch_reward = 0
    obs = env.reset()  # reset随机选取了一个起点
    agent.reset_memory()

    while True:
        # 存储已经走过的点
        agent.remember_state(obs)
        # 选择一个action
        action = agent.act(obs)
        # 与环境进行一个交互
        next_obs, reward, done = env.step(action)
        # 求的是最小值
        reward = -1 * reward
        # Q表中的reward
        agent.train(obs, action, reward, next_obs, done)
        # 更新总的回报
        epoch_reward = epoch_reward + reward
        # 将下一步付给这一步
        obs = next_obs

        # 计算step数,tsp不需要计算步数
        # total_steps += 1
        if done:
            break
    return epoch_reward


# 测试
def test(env, agent):
    epoch_reward = 0
    obs = env.reset()  # reset随机选取了一个起点
    agent.reset_memory()
    while True:
        # 存储已经走过的点
        agent.remember_state(obs)
        # 选择一个action
        action = agent.act(obs)
        # 与环境进行一个交互
        next_obs, reward, done = env.step(action)
        # 求的是最小值
        reward = -1 * reward
        # 更新总的回报
        epoch_reward = epoch_reward + reward
        # 将下一步付给这一步
        obs = next_obs

        if done:
            break
    return epoch_reward


# 获取距离矩阵
def getDistMatrix(point_list):
    distance_matrix_list = []
    for i in point_list:
        each_org_target = []
        for j in point_list:
            distance = round(math.sqrt((j[1] - i[1]) ** 2 + (j[0] - i[0]) ** 2), 2)
            each_org_target.append(distance)
        distance_matrix_list.append(each_org_target)
    distance_matrix = np.asarray(distance_matrix_list)
    return distance_matrix


def plotRewordFig(data):
    data = np.asarray(data)
    plt.plot(data[:, 0], data[:, 1], c='r')  # 参数c为color简写，表示颜色,r为red即红色
    plt.show()  # 显示图像
    pass


def plotPositionFig(point_list, route_sequence):
    plt.rcParams['font.sans-serif'] = ['FangSong']
    plt.rcParams['axes.unicode_minus'] = False
    plot_x_set = []
    plot_y_set = []

    point_location_x = []
    point_location_y = []
    for i, v in enumerate(point_list):
        x = v[0]
        y = v[1]
        label = '点' + str(i)
        point_location_x.append(x)
        point_location_y.append(y)
        plt.annotate(label, (x, y))
    plt.scatter(point_location_x, point_location_y, c='y')

    for point_id in route_sequence:
        point = point_list[point_id]
        plot_x_set.append(point[0])
        plot_y_set.append(point[1])
    plt.plot(plot_x_set, plot_y_set, 'b')
    plt.show()


if __name__ == '__main__':
    point_list = [
        [11, 12],
        [12, 13],
        [13, 14],
        [15, 18],
        [19, 10],
        [12, 17],
        [13, 9],
    ]
    points_num = len(point_list)
    distance_matrix = getDistMatrix(point_list)
    print(distance_matrix)
    tspEnv = TSPEnv(_points_num=points_num, _distance_matrix=distance_matrix)
    qLearningAgent = QLearningAgent(obs_n=points_num, act_n=points_num)
    # ep_reward = train_each_epoch(tspEnv, qLearningAgent)

    # 训练500个episode，打印每个episode的分数
    ep_reward_list = []
    for epoch in range(3000):
        ep_reward = train_each_epoch(tspEnv, qLearningAgent)
        print(ep_reward)
        if epoch % 20 == 0:
            ep_reward_list.append([epoch, -ep_reward])
    # 查看奖励曲线
    print(ep_reward_list)
    plotRewordFig(ep_reward_list)
    # 保存Q表
    qLearningAgent.save()

    epoch_reward = test(tspEnv, qLearningAgent)
    # print(qLearningAgent.Q)
    print(epoch_reward)
    route_sequence_str, route_sequence = tspEnv.render()
    print(route_sequence)

    plotPositionFig(point_list, route_sequence)

    # # 测试（有点奇怪）
    # test_qLearningAgent = QLearningAgent(obs_n=points_num, act_n=points_num, epsilon=0)
    # test_qLearningAgent.load()
    # # print(test_qLearningAgent.Q)
    # epoch_reward = test(tspEnv, test_qLearningAgent)
    # print(epoch_reward)
    # tspEnv.render()

    pass
