import collections
import random
import time

import numpy as np
import torch
from matplotlib import pyplot as plt


class ReplayBuffer:
    def __init__(self, capacity):
        self.buffer = collections.deque(maxlen=capacity)

    def add(self, state, action, reward, next_state, done):
        self.buffer.append((state, action, reward, next_state, done))

    def sample(self, batch_size):
        transitions = random.sample(self.buffer, batch_size)
        state, action, reward, next_state, done = zip(*transitions)
        return np.array(state), np.array(action), reward, np.array(next_state), done

    def size(self):
        return len(self.buffer)


def train_agent(env, agent, num_steps, replay_buffer, minimal_size, batch_size):
    return_list = []

    current_steps = 0  # 当前已经训练了多少步
    current_episode = 0  # 当前已经训练了多少轮（也就是和环境交互了多少轮）

    start_time = time.time()  # 开始计时
    print_interval = 1000 #多少步打印一次时间信息

    while current_steps < num_steps:
        episode_return = 0
        state = env.reset()
        state = state[0].tolist()
        done = False

        while not done:
            action = agent.take_action(state)
            result = env.step(action)
            next_state = result[0].tolist()
            reward = result[1]
            done = result[2]

            replay_buffer.add(state, action, reward, next_state, done)
            state = next_state
            episode_return += reward

            if replay_buffer.size() > minimal_size:
                b_s, b_a, b_r, b_ns, b_d = replay_buffer.sample(batch_size)
                transition_dict = {'states': b_s, 'actions': b_a, 'next_states': b_ns, 'rewards': b_r, 'dones': b_d}
                agent.update(transition_dict)
                current_steps += 1

                if current_steps % print_interval == 0:
                    time_cost = (time.time() - start_time)/60 #已经花了多少分钟

                    speed = current_steps/time_cost #训练速度 （步/分钟）

                    remaining_time = (num_steps-current_steps)/speed

                    print(f"{agent.name}:当前步数: {current_steps}/{num_steps}, 已用时间: {time_cost:.2f} 分钟,"
                          f" 预计剩余时间: {remaining_time:.2f} 分钟")

        current_episode += 1
        return_list.append(episode_return)

        if current_episode % 2000 == 0 or current_episode == 200:
            print(f'{agent.name}第{current_episode}轮的得分是：{episode_return}')
            agent.save_model(f'result/{agent.name}第{current_episode}轮训练结果_该轮分数{int(episode_return)}.parameter')

    agent.save_model(f'result/{agent.name}最终训练结果.parameter')

    return return_list


def plot_lines(y_values1, y_values2, label1='SAC', label2='SAC_PER', clip=False):
    """
    Plot two lines on the same graph.

    Parameters:
    y_values1 (list): The y-values of the first line.
    y_values2 (list): The y-values of the second line.
    label1 (str): The label for the first line.
    label2 (str): The label for the second line.
    clip (bool): If True, clip the longer list to the length of the shorter list.
    """
    # 如果 clip 为 True，则裁剪较长的列表到较短列表的长度
    if clip:
        min_len = min(len(y_values1), len(y_values2))
        y_values1 = y_values1[:min_len]
        y_values2 = y_values2[:min_len]

    # 生成 x 值，假设 x 与 y 一一对应
    x_values1 = list(range(len(y_values1)))
    x_values2 = list(range(len(y_values2)))

    # 绘制第一条曲线
    plt.plot(x_values1, y_values1, label=label1)
    # 绘制第二条曲线
    plt.plot(x_values2, y_values2, label=label2)

    # 添加图例
    plt.legend()

    # 添加标签
    plt.xlabel('Train Episode')
    plt.ylabel('Return')

    # 保存图表
    plt.savefig('result/compare_fig.png')

    # 显示图表
    plt.show()


def smooth_and_plot(y_values, n1, n2):
    def smooth(data, window_size):
        """对数据进行窗口平滑"""
        if window_size % 2 == 0:
            window_size += 1  # 确保窗口大小为奇数
        smoothed = []
        for i in range(len(data)):
            start = max(0, i - window_size // 2)
            end = min(len(data), i + window_size // 2 + 1)
            window = data[start:end]
            smoothed.append(np.mean(window))
        return np.array(smoothed)

    # 第一次平滑
    smoothed_values1 = smooth(y_values, n1)
    # 第二次平滑
    smoothed_values2 = smooth(smoothed_values1, n2)

    # 绘图
    plt.figure(figsize=(10, 5))

    # 用数据1绘制原始数据的变化区域
    plt.fill_between(
        range(len(smoothed_values1)),
        smoothed_values1,
        color='blue',
        alpha=0.2,
        label=f'Original Data'
    )

    # 用数据2绘制最终平滑曲线
    plt.plot(smoothed_values2, label=f'Smoothed Data', color='red')

    # 添加标题和标签
    plt.legend()
    plt.xlabel('Train Episode')
    plt.ylabel('Return')
    plt.show()


#=========================================下面的部分为实现优先经验回放=============================

class SumTree:
    def __init__(self, capacity: int):
        self.capacity = capacity # 最多可以存储多少条数据。
        self.data_pointer = 0 # 用于追踪下一个新数据项应该被添加到 data 数组的哪个位置。
        self.n_entries = 0 #记录当前 SumTree 中实际存储的数据项数。
        self.tree = np.zeros(2 * capacity - 1) #self.tree 存储的是树结构中每个节点的(累积)优先值。
        """
        完全二叉树中，如果有n个叶子结点，那么结点总数 N = 2n-1
        在self.tree当中，前 capacity - 1 个元素对应内部结点，后 capacity 个元素对应叶子结点。
        每个内部节点存储的是其两个子节点优先值的和，叶子结点存储的就是各个数据的优先值。
        """

        self.data = np.zeros(capacity, dtype = object) #用于存储实际的数据项。

    def update(self, tree_idx, p):
        """
        :param tree_idx: 树中需要更新的节点的索引
        :param p: 新的优先值

        """
        change = p - self.tree[tree_idx] # change = 新-旧
        self.tree[tree_idx] = p #新的优先值 p 赋给 tree_idx 指定的节点

        while tree_idx != 0:
            tree_idx = (tree_idx - 1) // 2 #当前节点的索引减去 1 然后整除以 2 来得到父节点的索引。
            self.tree[tree_idx] += change #将之前计算的变化量 change 加到父节点上。

    def add(self, p, data):
        """
        用于向树中添加一个新的数据项及其对应的优先值。
        :param p: 新数据的优先值
        :param data: 新数据本身
        """
        tree_idx = self.data_pointer + self.capacity - 1 #计算新数据项对应的叶子节点在 self.tree 中的索引。
        """
        self.data_pointer 是新数据项在 self.data 中的位置，
        在self.tree当中，前 capacity - 1 个元素对应内部结点，后 capacity 个元素对应叶子结点。
        比如self.data_pointer=0，表示这是第一条数据，那么它本身存储在self.data的下标为0的位置，
        而它的优先值存储在self.tree当中下标为 capacity - 1 + 0 的位置。
        """
        self.data[self.data_pointer] = data
        self.update(tree_idx, p)

        self.data_pointer += 1
        if self.data_pointer >= self.capacity:
            self.data_pointer = 0
            #将 self.data_pointer 重置为 0，这样下一次添加数据时会从 self.data 数组的开头开始。

        if self.n_entries < self.capacity:
            self.n_entries += 1

    def get_leaf(self, v):
        """
        根据给定的值 v 从树中采样一个叶子节点.
        """
        parent_idx = 0 #初始化当前结点为根结点
        while True:
            cl_idx = 2 * parent_idx + 1 #计算当前节点的左子节点的索引
            cr_idx = cl_idx + 1         #计算当前节点的右子节点的索引
            if cl_idx >= len(self.tree):
                """
                检查左子节点的索引是否 >= self.tree 数组的长度。如果是，说明当前节点是叶子节点。
                """
                leaf_idx = parent_idx
                break
            else:
                """
                否则当前结点不是叶子结点。
                """
                if v <= self.tree[cl_idx] :
                    parent_idx = cl_idx
                    """
                    如果v的值小于等于左子结点的值，那接下来就往左子结点的方向搜索。
                    """
                else:
                    v -= self.tree[cl_idx]
                    parent_idx = cr_idx
                    """
                    否则，把v的值减去左子结点的值再往右子结点方向搜索。
                    """

        data_idx = leaf_idx - self.capacity + 1
        return leaf_idx, self.tree[leaf_idx], self.data[data_idx]
        #返回的3项分别是：数据项的优先值在二叉树当中的下标，数据项的优先值，数据项本身

    def total(self):
        return int(self.tree[0]) #这是全部数据的优先值之和


class ReplayTree:
    def __init__(self, capacity):
        self.capacity = capacity  # the capacity for memory replay
        self.tree = SumTree(capacity)
        self.abs_err_upper = 1.

        ## hyper parameter for calculating the importance sampling weight
        self.beta_increment_per_sampling = 0.001
        self.alpha = 0.6
        self.beta = 0.4
        self.epsilon = 0.01
        self.abs_err_upper = 1.

    def __len__(self):
        """
        :return: 一共存储了多少条数据
        """
        return self.tree.n_entries

    def push(self, error, sample):
        p = (np.abs(error) + self.epsilon) ** self.alpha #根据error和一些超参数计算该经验的优先值。
        self.tree.add(p, sample)

    def sample(self, batch_size):
        pri_segment = self.tree.total() / batch_size

        priorities = []
        batch = []
        idxs = []

        # is_weights = []

        self.beta = np.min([1., self.beta + self.beta_increment_per_sampling])
        #min_prob = np.min(self.tree.tree[-self.tree.capacity:]) / self.tree.total()

        for i in range(batch_size):
            a = pri_segment * i
            b = pri_segment * (i + 1)

            s = random.uniform(a, b)
            idx, p, data = self.tree.get_leaf(s)

            if data != 0:
                priorities.append(p)
                batch.append(data)
                idxs.append(idx)

        sampling_probabilities = np.array(priorities) / self.tree.total() #得到每个样本的采样概率。
        is_weights = np.power(self.tree.n_entries * sampling_probabilities, -self.beta)
        is_weights /= is_weights.max()

        return zip(*batch), idxs, is_weights

    def batch_update(self, tree_idx, abs_errors):
        abs_errors += self.epsilon

        clipped_errors = np.minimum(abs_errors, self.abs_err_upper)
        ps = np.power(clipped_errors, self.alpha)

        for ti, p in zip(tree_idx, ps):
            self.tree.update(ti, p)

def train_agent_PER(env, agent, num_steps, minimal_size):
    return_list = []

    current_steps = 0  # 当前已经训练了多少步
    current_episode = 0  # 当前已经训练了多少轮
    start_time = time.time()  # 开始计时
    print_interval = 1000  # 每多少步打印一次信息

    while current_steps < num_steps:
        episode_return = 0
        state = env.reset()
        state = state[0].tolist()
        done = False

        while not done:
            action = agent.take_action(state)
            result = env.step(action)
            next_state = result[0].tolist()
            reward = result[1]
            done = result[2]

            # 计算 TD 误差用于优先经验回放
            with torch.no_grad():
                action_tensor = torch.tensor([action], dtype=torch.float).to(agent.device)
                state_tensor = torch.tensor([state], dtype=torch.float).to(agent.device)
                next_state_tensor = torch.tensor([next_state], dtype=torch.float).to(agent.device)

                # 计算当前 Q 网络的值
                q1_value = agent.critic_1(state_tensor, action_tensor)
                q2_value = agent.critic_2(state_tensor, action_tensor)

                # 计算目标 Q 值
                target = agent.calc_target(
                    torch.tensor([reward], dtype=torch.float).to(agent.device),
                    next_state_tensor,
                    torch.tensor([done], dtype=torch.float).to(agent.device),
                )

                # TD 误差基于 Q1 和 Q2 的平均误差
                td_error = (0.5 * ((q1_value - target).abs() + (q2_value - target).abs())).cpu().item()

            # 存储经验到优先经验回放
            agent.memory.push(td_error, (state, action, reward, next_state, done))
            state = next_state
            episode_return += reward

            # 经验池达到最小大小后，开始训练
            if len(agent.memory) > minimal_size:
                agent.update()
                current_steps += 1

                if current_steps % print_interval == 0:
                    time_cost = (time.time() - start_time) / 60  # 已用时间（分钟）
                    speed = current_steps / time_cost  # 训练速度（步/分钟）
                    remaining_time = (num_steps - current_steps) / speed

                    print(f"{agent.name}:当前步数: {current_steps}/{num_steps}, 已用时间: {time_cost:.2f} 分钟, "
                          f"预计剩余时间: {remaining_time:.2f} 分钟")

        current_episode += 1
        return_list.append(episode_return)

        if current_episode % 2000 == 0 or current_episode == 200:
            print(f'{agent.name}第{current_episode}轮的得分是：{episode_return}')
            agent.save_model(f'result/{agent.name}第{current_episode}轮训练结果_该轮分数{int(episode_return)}.parameter')

    # 保存最终模型
    agent.save_model(f'result/{agent.name}最终训练结果.parameter')

    return return_list

def save_results_to_file(results, filename):
    np.save(filename, results)

def load_results_from_file(filename):
    return np.load(filename)