"""
记忆模块
1. 增：增加现有的记忆
2. 删：删除记忆，主要是记忆的遗忘
3. 改：改变记忆，可能是反思的结果
4. 查：检索记忆，对决策产生影响

注：
1. 需要在所有的操作完成之后对记忆进行保存
2. 在构建数据表时需要构建表头，为了便于检索
"""
import numpy as np
import csv
import pandas as pd


class MemoryModule:
    def __init__(self, capacity, file_name, alpha=0.6):
        """
        Args:
            capacity: 记忆容量
            alpha: 控制优先级影响的参数（用于优先级采样）
            file_name: 存储记忆的文件路径
        """

        """
        记忆的评分由三部分组成,共同组成记忆的优先级
        recency：表明记忆产生的新旧程度，随着时间衰减，指定衰减因子为0.99
        importance：记忆的重要程度，设置为1-10之间的数字
        relevance：当前记忆与检索记忆的相关程度，计算当前记忆的嵌入向量与查询记忆的嵌入向量的余弦相似度
        """
        self.alpha = alpha
        self.recency = []
        self.recency_decay = 0.99
        self.importance = []     # 暂时未用到
        self.relevance = []      # 暂时未用到

        self.capacity = capacity    # 缓冲区总容量
        self.buffer = []    # 存储记忆的列表
        self.pos = 0    # 缓冲区当前的位置
        self.priorities = []   # 存储每段记忆的优先级
        self.time_stamps = []  # 记录每段记忆的时间戳

        self.file_name = file_name      # 记忆文件的路径
        self.timestamp_name = 'time_stamps'     # 时间戳在文件中的列名
        self.priority_name = 'priority'         # 优先级在文件中的列名
        self.importance_name = 'importance'     # 重要性在文件中的列名

    def push(self, agent_id, memory_importance, memory_type, memory_data):
        """
        存储一个记忆
        !!!! 这里关于排序是写死的，后面还需要修改
        Args:
            agent_id: 插入记忆的agent在运行时的id
            memory_importance: 记忆的重要程度
            memory_type: 记忆的类型（后续检索时需要用到）
            memory_data: 记忆数据
        """
        # max_prio = max(self.priorities, default=1.0)    # 新的记忆优先级最高（这个后续还要调整）

        # 当前记忆的时间戳还与文件中记忆的时间戳有关
        memory_df = pd.read_csv(self.file_name)
        time_df = memory_df[self.timestamp_name]    # ！！！筛选目前记忆文件中时间戳的最大值，后续记忆的时间戳在此基础上累加
        time_value = time_df.values

        flag = 1
        if len(time_value) == 0:
            print("Time value is empty!")
            flag = 0
            # time_value = [0]

        if len(self.time_stamps) == 0:
            current_time = max(time_value, default=1.0) + flag
        else:
            current_time = max(self.time_stamps) + 1  # 当前记忆的时间戳

        # print(type(time_value))
        current_time = int(current_time)

        min_time = min(time_value, default=0.0)
        time_norm = (current_time - min_time) / self.capacity       # !!!暂时先这样设置

        # 重要性设置，需要设置为0-10之间的数字
        if 0 <= memory_importance <= 10:
            importance_norm = memory_importance / 10
        else:
            print("Importance out of scop! The value may be in range[0, 11]")
            importance_norm = 0

        prio = time_norm + importance_norm      # 优先级是由时间和重要性两部分组成

        if len(self.buffer) < self.capacity:
            self.buffer.append([agent_id, memory_type, memory_importance, current_time, prio] + memory_data)
            self.priorities.append(prio)    # 设置新记忆的优先级
            self.time_stamps.append(current_time)   # 记录记忆的时间戳
            self.importance.append(memory_importance)       # 记录记忆的重要程度
        else:
            self.buffer[self.pos] = [agent_id, memory_type, memory_importance, current_time, prio] + memory_data
            self.priorities[self.pos] = prio
            self.time_stamps[self.pos] = current_time
            self.importance[self.pos] = memory_importance

        self.pos = (self.pos + 1) % self.capacity   # 更新位置

    def uniform_sample(self, batch_size):
        """
        均匀随机采样：每段记忆被选中的几率相同
        采样的数据是从文件中读取，所以在采样之前需要先对记忆进行存储
        Args:
            batch_size: 采样批次大小
        """
        memory_df = pd.read_csv(self.file_name)
        len_memory = memory_df.shape[0]
        memory_value = memory_df.values

        indices = np.random.choice(len_memory, size=batch_size, replace=False)
        samples = [memory_value[idx] for idx in indices]
        return samples

    def time_weighted_sample(self, batch_size):
        """
        时间序列采样：强调最近的记忆，采用时间衰减的方式计算权重
        采样的数据是从文件中读取，所以在采样之前需要先对记忆进行存储
        Args:
            batch_size: 采样批次大小
        """

        memory_df = pd.read_csv(self.file_name)
        memory_value = memory_df.values
        memory_time_df = memory_df[self.timestamp_name]      # ！！！取出时间戳这一列
        len_memory = memory_df.shape[0]
        memory_time_value = memory_time_df.values

        time_weights = []
        total_ts = 0
        for ts in memory_time_value:
            ts = self.recency_decay ** (max(memory_time_value) - ts)
            time_weights.append(ts)
            total_ts += ts
        time_weights = [item / total_ts for item in time_weights]
        indices = np.random.choice(len_memory, size=batch_size, p=time_weights)
        samples = [memory_value[idx] for idx in indices]
        return samples

    def priority_weighted_sample(self, batch_size):
        """
        基于优先级采样：根据之前设置的优先级进行采样
        采样的数据是从文件中读取，所以在采样之前需要先对记忆进行存储
        Args:
            batch_size: 采样批次大小
        """
        memory_df = pd.read_csv(self.file_name)
        memory_value = memory_df.values
        len_memory = memory_df.shape[0]
        memory_priorities_df = memory_df[self.priority_name]
        memory_priorities_value = memory_priorities_df.values

        probs = [item * self.alpha for item in memory_priorities_value]
        probs_total = sum(probs)
        probs = [item / probs_total for item in probs]
        indices = np.random.choice(len_memory, size=batch_size, p=probs)
        samples = [memory_value[idx] for idx in indices]
        return samples

    def update_priorities(self, indices, priorities):
        """
        更新记忆的优先级
        Args:
            indices: 记忆的索引
            priorities: 新的优先级值
        """
        for idx, prio in zip(indices, priorities):
            self.priorities[idx] = prio     # 更新指定记忆的优先级

    def save_memory(self):
        """
        将记忆存储到csv文件中
        """
        len_memory = len(self.buffer)
        with open(self.file_name, 'a', newline='') as file:
            print("writing...")
            writer = csv.writer(file)
            for i in range(len_memory):
                data = self.buffer[i]
                # print(data)
                writer.writerow(data)

        # 在存储完新的记忆之后需要检查总的记忆容量是否超标
        # self.sort_memory(sort_name)
        # self.forget_by_capacity()

    def sort_memory(self, sort_name, ascending=False):
        """
        对记忆文件进行排序，默认是进行降序排序
        此操作最好是在检查记忆容量之前操作，因为检查记忆容量会对记忆进行删除操作
        Args:
            sort_name: 排序依据的列名
            ascending: 是否进行升序排序，默认为False
        """
        memory_df = pd.read_csv(self.file_name)
        memory_df = memory_df.sort_values(by=sort_name, ascending=ascending)
        memory_df.to_csv(self.file_name, encoding='utf-8', index=False)
        print("sorting...")

    def forget_by_capacity(self):
        """
        基于容量的遗忘
        """
        memory_df = pd.read_csv(self.file_name)
        len_memory = memory_df.shape[0]
        if self.capacity >= len_memory:
            return
        else:
            # 当记忆容量超标的情况下需要对记忆进行删除操作
            print("drop memory by capacity!")
            memory_df.drop(memory_df.index[self.capacity:], inplace=True)
            memory_df.to_csv(self.file_name, encoding='utf-8', index=False)

    def forget_by_time(self, time_threshold):
        """
        基于时间的遗忘(在遗忘完成后重新刷新时间戳)
        Args:
            time_threshold: 遗忘的时间阈值
        """
        memory_df = pd.read_csv(self.file_name)
        time_df = memory_df[self.timestamp_name]
        time_value = time_df.values
        len_memory = memory_df.shape[0]
        max_time = max(time_value)

        if time_threshold > max_time:
            print("Time threshold out of scop!")
        else:
            memory_df = memory_df.drop(memory_df[memory_df[self.timestamp_name] <= time_threshold].index)
            # 刷新时间戳
            for i in range(len_memory):
                time_index = memory_df.columns.get_loc(self.timestamp_name)
                memory_df.iloc[i, time_index] -= time_threshold

            memory_df.to_csv(self.file_name, encoding='utf-8', index=False)

    def forget_by_priority(self, priority_threshold):
        """
        基于优先级的遗忘
        Args:
            priority_threshold: 遗忘优先级的阈值
        """
        memory_df = pd.read_csv(self.file_name)
        priority_df = memory_df[self.priority_name]
        priority_value = priority_df.values
        max_priority = max(priority_value)

        if priority_threshold > max_priority:
            print("Priority threshold out of scop!")
        else:
            memory_df = memory_df.drop(memory_df[memory_df['priority'] <= priority_threshold].index)
            memory_df.to_csv(self.file_name, encoding='utf-8', index=False)

    def __len__(self):
        """
        返回缓冲区中记忆的数量
        """
        return len(self.buffer)
