# 导入所需的库
import json                      # 用于处理JSON格式数据
import pickle                    # 用于序列化和反序列化Python对象
import struct                    # 用于处理二进制数据
from pathlib import Path         # 用于处理文件路径

import numpy as np               # 用于数值计算
import torch                     # 用于深度学习框架
from tqdm import tqdm            # 用于显示进度条


class MyDataset(torch.utils.data.Dataset):
    """
    用户序列数据集类，继承自torch.utils.data.Dataset
    
    该类用于处理用户行为序列数据，支持训练数据的加载和预处理
    """

    def __init__(self, data_dir, args):
        """
        初始化数据集
        
        Args:
            data_dir: 数据文件目录路径
            args: 包含全局参数的对象，如maxlen等
        """
        super().__init__()                           # 调用父类初始化方法
        self.data_dir = Path(data_dir)               # 将数据目录路径转换为Path对象
        self._load_data_and_offsets()                # 加载数据文件和偏移量信息
        self.maxlen = args.maxlen                    # 设置序列最大长度
        self.mm_emb_ids = args.mm_emb_id             # 获取多模态embedding特征ID列表

        # 加载物品特征字典
        self.item_feat_dict = json.load(open(Path(data_dir, "item_feat_dict.json"), 'r'))
        # 加载多模态embedding特征字典
        self.mm_emb_dict = load_mm_emb(Path(data_dir, "creative_emb"), self.mm_emb_ids)
        
        # 加载索引器并获取物品数和用户数
        with open(self.data_dir / 'indexer.pkl', 'rb') as ff:
            indexer = pickle.load(ff)
            self.itemnum = len(indexer['i'])         # 物品数量
            self.usernum = len(indexer['u'])         # 用户数量
        
        # 创建反向索引字典
        self.indexer_i_rev = {v: k for k, v in indexer['i'].items()}  # 物品索引反向映射
        self.indexer_u_rev = {v: k for k, v in indexer['u'].items()}  # 用户索引反向映射
        self.indexer = indexer                       # 保存索引器

        # 初始化特征信息
        self.feature_default_value, self.feature_types, self.feat_statistics = self._init_feat_info()

    def _load_data_and_offsets(self):
        """
        加载用户序列数据文件和偏移量信息，用于快速随机访问
        """
        self.data_file = open(self.data_dir / "seq.jsonl", 'rb')  # 打开序列数据文件
        with open(Path(self.data_dir, 'seq_offsets.pkl'), 'rb') as f:
            self.seq_offsets = pickle.load(f)        # 加载每行数据的文件偏移量

    def _load_user_data(self, uid):
        """
        根据用户ID从文件中加载特定用户的数据
        
        Args:
            uid: 用户ID(reid)
            
        Returns:
            data: 用户序列数据，格式为[(user_id, item_id, user_feat, item_feat, action_type, timestamp)]
        """
        self.data_file.seek(self.seq_offsets[uid])   # 定位到用户数据在文件中的位置
        line = self.data_file.readline()             # 读取该行数据
        data = json.loads(line)                      # 解析JSON数据
        return data

    def _random_neq(self, l, r, s):
        """
        生成一个不在指定序列中的随机整数，用于负采样
        
        Args:
            l: 随机数下界
            r: 随机数上界
            s: 需要避免的数值集合
            
        Returns:
            t: 不在序列s中的随机整数
        """
        t = np.random.randint(l, r)                  # 生成随机整数
        # 循环直到生成一个既不在s中又在item_feat_dict中的数
        while t in s or str(t) not in self.item_feat_dict:
            t = np.random.randint(l, r)
        return t

    def __getitem__(self, uid):
        """
        获取指定用户的数据，并进行预处理以适应模型输入格式
        
        Args:
            uid: 用户ID(reid)
            
        Returns:
            多个数组组成的元组，包括序列、正负样本及其特征等
        """
        user_sequence = self._load_user_data(uid)    # 加载用户数据

        # 扩展用户序列，将user和item特征分别处理
        ext_user_sequence = []
        for record_tuple in user_sequence:
            u, i, user_feat, item_feat, action_type, _ = record_tuple
            if u and user_feat:
                # 将用户特征插入到序列开头，类型标记为2
                ext_user_sequence.insert(0, (u, user_feat, 2, action_type))
            if i and item_feat:
                # 将物品特征添加到序列末尾，类型标记为1
                ext_user_sequence.append((i, item_feat, 1, action_type))

        # 初始化返回数组
        seq = np.zeros([self.maxlen + 1], dtype=np.int32)        # 用户序列ID
        pos = np.zeros([self.maxlen + 1], dtype=np.int32)        # 正样本ID
        neg = np.zeros([self.maxlen + 1], dtype=np.int32)        # 负样本ID
        token_type = np.zeros([self.maxlen + 1], dtype=np.int32) # 序列类型标记
        next_token_type = np.zeros([self.maxlen + 1], dtype=np.int32) # 下一token类型
        next_action_type = np.zeros([self.maxlen + 1], dtype=np.int32) # 下一动作类型

        # 特征数组，使用object类型以支持字典存储
        seq_feat = np.empty([self.maxlen + 1], dtype=object)
        pos_feat = np.empty([self.maxlen + 1], dtype=object)
        neg_feat = np.empty([self.maxlen + 1], dtype=object)

        nxt = ext_user_sequence[-1]                  # 序列的最后一个元素
        idx = self.maxlen                            # 从最大长度开始填充

        # 收集序列中出现的物品ID
        ts = set()
        for record_tuple in ext_user_sequence:
            if record_tuple[2] == 1 and record_tuple[0]:
                ts.add(record_tuple[0])

        # 从后往前遍历序列进行left-padding处理
        for record_tuple in reversed(ext_user_sequence[:-1]):
            i, feat, type_, act_type = record_tuple
            next_i, next_feat, next_type, next_act_type = nxt
            
            # 填充缺失特征
            feat = self.fill_missing_feat(feat, i)
            next_feat = self.fill_missing_feat(next_feat, next_i)
            
            # 填充各种数组
            seq[idx] = i
            token_type[idx] = type_
            next_token_type[idx] = next_type
            if next_act_type is not None:
                next_action_type[idx] = next_act_type
            seq_feat[idx] = feat
            
            # 处理正负样本
            if next_type == 1 and next_i != 0:
                pos[idx] = next_i
                pos_feat[idx] = next_feat
                neg_id = self._random_neq(1, self.itemnum + 1, ts)  # 负采样
                neg[idx] = neg_id
                neg_feat[idx] = self.fill_missing_feat(self.item_feat_dict[str(neg_id)], neg_id)
            
            nxt = record_tuple
            idx -= 1
            if idx == -1:
                break

        # 处理缺失特征，用默认值填充
        seq_feat = np.where(seq_feat == None, self.feature_default_value, seq_feat)
        pos_feat = np.where(pos_feat == None, self.feature_default_value, pos_feat)
        neg_feat = np.where(neg_feat == None, self.feature_default_value, neg_feat)

        return seq, pos, neg, token_type, next_token_type, next_action_type, seq_feat, pos_feat, neg_feat

    def __len__(self):
        """
        返回数据集大小（用户数量）
        
        Returns:
            用户数量
        """
        return len(self.seq_offsets)

    def _init_feat_info(self):
        """
        初始化特征相关信息
        
        Returns:
            feat_default_value: 特征默认值字典
            feat_types: 特征类型字典
            feat_statistics: 特征统计信息字典
        """
        feat_default_value = {}                      # 特征默认值
        feat_statistics = {}                         # 特征统计信息
        feat_types = {}                              # 特征类型分类
        
        # 定义各类特征的ID
        feat_types['user_sparse'] = ['103', '104', '105', '109']
        feat_types['item_sparse'] = [
            '100', '117', '111', '118', '101', '102', '119', '120',
            '114', '112', '121', '115', '122', '116',
        ]
        feat_types['item_array'] = []
        feat_types['user_array'] = ['106', '107', '108', '110']
        feat_types['item_emb'] = self.mm_emb_ids
        feat_types['user_continual'] = []
        feat_types['item_continual'] = []

        # 为各类特征设置默认值和统计信息
        for feat_id in feat_types['user_sparse']:
            feat_default_value[feat_id] = 0
            feat_statistics[feat_id] = len(self.indexer['f'][feat_id])
        for feat_id in feat_types['item_sparse']:
            feat_default_value[feat_id] = 0
            feat_statistics[feat_id] = len(self.indexer['f'][feat_id])
        for feat_id in feat_types['item_array']:
            feat_default_value[feat_id] = [0]
            feat_statistics[feat_id] = len(self.indexer['f'][feat_id])
        for feat_id in feat_types['user_array']:
            feat_default_value[feat_id] = [0]
            feat_statistics[feat_id] = len(self.indexer['f'][feat_id])
        for feat_id in feat_types['user_continual']:
            feat_default_value[feat_id] = 0
        for feat_id in feat_types['item_continual']:
            feat_default_value[feat_id] = 0
        for feat_id in feat_types['item_emb']:
            feat_default_value[feat_id] = np.zeros(
                list(self.mm_emb_dict[feat_id].values())[0].shape[0], dtype=np.float32
            )

        return feat_default_value, feat_types, feat_statistics

    def fill_missing_feat(self, feat, item_id):
        """
        填充缺失的特征值
        
        Args:
            feat: 特征字典
            item_id: 物品ID
            
        Returns:
            filled_feat: 填充后的特征字典
        """
        if feat is None:
            feat = {}
        filled_feat = {}
        for k in feat.keys():
            filled_feat[k] = feat[k]

        # 收集所有特征ID
        all_feat_ids = []
        for feat_type in self.feature_types.values():
            all_feat_ids.extend(feat_type)
        
        # 找出缺失的特征并用默认值填充
        missing_fields = set(all_feat_ids) - set(feat.keys())
        for feat_id in missing_fields:
            filled_feat[feat_id] = self.feature_default_value[feat_id]
        
        # 处理多模态embedding特征
        for feat_id in self.feature_types['item_emb']:
            if item_id != 0 and self.indexer_i_rev[item_id] in self.mm_emb_dict[feat_id]:
                if type(self.mm_emb_dict[feat_id][self.indexer_i_rev[item_id]]) == np.ndarray:
                    filled_feat[feat_id] = self.mm_emb_dict[feat_id][self.indexer_i_rev[item_id]]

        return filled_feat

    @staticmethod
    def collate_fn(batch):
        """
        自定义批处理函数，用于将多个样本组合成一个批次
        
        Args:
            batch: 包含多个样本的列表
            
        Returns:
            组合后的批次数据
        """
        # 解包批次数据
        seq, pos, neg, token_type, next_token_type, next_action_type, seq_feat, pos_feat, neg_feat = zip(*batch)
        
        # 转换为torch张量
        seq = torch.from_numpy(np.array(seq))
        pos = torch.from_numpy(np.array(pos))
        neg = torch.from_numpy(np.array(neg))
        token_type = torch.from_numpy(np.array(token_type))
        next_token_type = torch.from_numpy(np.array(next_token_type))
        next_action_type = torch.from_numpy(np.array(next_action_type))
        
        # 特征数据保持为列表形式
        seq_feat = list(seq_feat)
        pos_feat = list(pos_feat)
        neg_feat = list(neg_feat)
        
        return seq, pos, neg, token_type, next_token_type, next_action_type, seq_feat, pos_feat, neg_feat


class MyTestDataset(MyDataset):
    """
    测试数据集类，继承自MyDataset
    """

    def __init__(self, data_dir, args):
        """
        初始化测试数据集
        
        Args:
            data_dir: 数据目录路径
            args: 参数对象
        """
        super().__init__(data_dir, args)

    def _load_data_and_offsets(self):
        """
        重写父类方法，加载测试数据文件和偏移量
        """
        self.data_file = open(self.data_dir / "predict_seq.jsonl", 'rb')
        with open(Path(self.data_dir, 'predict_seq_offsets.pkl'), 'rb') as f:
            self.seq_offsets = pickle.load(f)

    def _process_cold_start_feat(self, feat):
        """
        处理冷启动特征（训练集中未出现过的特征）
        
        Args:
            feat: 原始特征字典
            
        Returns:
            processed_feat: 处理后的特征字典
        """
        processed_feat = {}
        for feat_id, feat_value in feat.items():
            if type(feat_value) == list:
                # 处理列表类型的特征值
                value_list = []
                for v in feat_value:
                    if type(v) == str:
                        value_list.append(0)  # 字符串值替换为0
                    else:
                        value_list.append(v)
                processed_feat[feat_id] = value_list
            elif type(feat_value) == str:
                processed_feat[feat_id] = 0  # 字符串值替换为0
            else:
                processed_feat[feat_id] = feat_value
        return processed_feat

    def __getitem__(self, uid):
        """
        获取测试数据集中的指定用户数据
        
        Args:
            uid: 用户ID
            
        Returns:
            测试所需的数据元组
        """
        user_sequence = self._load_user_data(uid)  # 加载用户数据

        # 扩展用户序列
        ext_user_sequence = []
        for record_tuple in user_sequence:
            u, i, user_feat, item_feat, _, _ = record_tuple
            if u:
                if type(u) == str:  # 如果是字符串，说明是user_id
                    user_id = u
                else:  # 如果是int，说明是re_id
                    user_id = self.indexer_u_rev[u]
            if u and user_feat:
                if type(u) == str:
                    u = 0
                if user_feat:
                    user_feat = self._process_cold_start_feat(user_feat)
                ext_user_sequence.insert(0, (u, user_feat, 2))

            if i and item_feat:
                # 处理训练时未见过的物品
                if i > self.itemnum:
                    i = 0
                if item_feat:
                    item_feat = self._process_cold_start_feat(item_feat)
                ext_user_sequence.append((i, item_feat, 1))

        # 初始化返回数组
        seq = np.zeros([self.maxlen + 1], dtype=np.int32)
        token_type = np.zeros([self.maxlen + 1], dtype=np.int32)
        seq_feat = np.empty([self.maxlen + 1], dtype=object)

        idx = self.maxlen

        # 收集物品ID
        ts = set()
        for record_tuple in ext_user_sequence:
            if record_tuple[2] == 1 and record_tuple[0]:
                ts.add(record_tuple[0])

        # 填充序列数据
        for record_tuple in reversed(ext_user_sequence[:-1]):
            i, feat, type_ = record_tuple
            feat = self.fill_missing_feat(feat, i)
            seq[idx] = i
            token_type[idx] = type_
            seq_feat[idx] = feat
            idx -= 1
            if idx == -1:
                break

        # 处理缺失特征
        seq_feat = np.where(seq_feat == None, self.feature_default_value, seq_feat)

        return seq, token_type, seq_feat, user_id

    def __len__(self):
        """
        返回测试数据集大小
        
        Returns:
            测试用户数量
        """
        with open(Path(self.data_dir, 'predict_seq_offsets.pkl'), 'rb') as f:
            temp = pickle.load(f)
        return len(temp)

    @staticmethod
    def collate_fn(batch):
        """
        测试数据集的批处理函数
        
        Args:
            batch: 批次数据
            
        Returns:
            组合后的批次数据
        """
        seq, token_type, seq_feat, user_id = zip(*batch)
        seq = torch.from_numpy(np.array(seq))
        token_type = torch.from_numpy(np.array(token_type))
        seq_feat = list(seq_feat)

        return seq, token_type, seq_feat, user_id


def save_emb(emb, save_path):
    """
    将Embedding保存为二进制文件
    
    Args:
        emb: 要保存的Embedding数组
        save_path: 保存路径
    """
    num_points = emb.shape[0]      # 数据点数量
    num_dimensions = emb.shape[1]  # 向量维度
    print(f'saving {save_path}')
    with open(Path(save_path), 'wb') as f:
        # 写入头部信息（数据点数和维度）
        f.write(struct.pack('II', num_points, num_dimensions))
        emb.tofile(f)              # 写入embedding数据


def load_mm_emb(mm_path, feat_ids):
    """
    加载多模态特征Embedding
    
    Args:
        mm_path: 多模态特征路径
        feat_ids: 需要加载的特征ID列表
        
    Returns:
        mm_emb_dict: 多模态embedding字典
    """
    # 不同特征ID对应的embedding维度
    SHAPE_DICT = {"81": 32, "82": 1024, "83": 3584, "84": 4096, "85": 3584, "86": 3584}
    mm_emb_dict = {}
    
    # 逐个加载特征embedding
    for feat_id in tqdm(feat_ids, desc='Loading mm_emb'):
        shape = SHAPE_DICT[feat_id]
        emb_dict = {}
        if feat_id != '81':
            try:
                base_path = Path(mm_path, f'emb_{feat_id}_{shape}')
                # 遍历所有part文件
                for json_file in base_path.glob('part*'):
                    with open(json_file, 'r', encoding='utf-8') as file:
                        for line in file:
                            data_dict_origin = json.loads(line.strip())
                            insert_emb = data_dict_origin['emb']
                            if isinstance(insert_emb, list):
                                insert_emb = np.array(insert_emb, dtype=np.float32)
                            data_dict = {data_dict_origin['anonymous_cid']: insert_emb}
                            emb_dict.update(data_dict)
            except Exception as e:
                print(f"transfer error: {e}")
        if feat_id == '81':
            # 特殊处理feat_id为'81'的情况
            with open(Path(mm_path, f'emb_{feat_id}_{shape}.pkl'), 'rb') as f:
                emb_dict = pickle.load(f)
        mm_emb_dict[feat_id] = emb_dict
        print(f'Loaded #{feat_id} mm_emb')
    return mm_emb_dict