"""
完整的TikTok短视频推荐数据预处理代码
基于南京论文第三章模型预处理部分
适配Track2数据格式
优化版本：支持大文件流式处理，提高内存效率
"""
import os
import json
import random
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
from scipy.sparse import coo_matrix, save_npz, load_npz
from torch.utils.data import Dataset, DataLoader
from collections import Counter, defaultdict
import gc
from tqdm import tqdm
import pickle

# 论文超参数配置（源自3.4节实验设置）
CONFIG = {
    "video_max_duration": 300,  # 短视频最大时长（秒）
    "frame_sample_interval": 2,  # 关键帧抽取间隔（秒）
    "max_frames_per_video": 10,  # 单视频最大关键帧数
    "min_valid_frames": 3,  # 单视频最小有效关键帧数
    "audio_sample_rate": 16000,  # VGGish输入采样率
    "modal_dim": 250,  # 多模态特征统一维度（论文最优值）
    "train_val_test_split": [0.8, 0.1, 0.1],  # 8:1:1划分比例
    "batch_size": 2048,  # 批次大小
    "neg_sample_num": 5,  # 每个正样本对应负样本数（BPR损失需求）
    "min_video_interactions": 10,  # 冷门视频过滤阈值
    "title_vocab_size": 10000,  # 标题词袋词汇表大小
}


def get_file_line_count(filepath):
    """获取文件行数（用于进度显示）"""
    try:
        with open(filepath, 'r', encoding='utf-8') as f:
            return sum(1 for _ in f)
    except:
        return None


def load_track2_data(data_dir, batch_size=10000, show_progress=True):
    """
    加载Track2数据格式的所有文件（优化版：支持大文件流式处理）
    :param data_dir: 数据文件夹路径
    :param batch_size: 批处理大小（用于进度显示）
    :param show_progress: 是否显示进度条
    :return: 音频特征、视频特征、标题特征、人脸属性、交互数据
    """
    print("=" * 60)
    print("步骤1: 加载Track2数据（优化大文件处理）")
    print("=" * 60)
    
    audio_features = {}
    video_features = {}
    title_features = {}
    face_attrs = {}
    
    # 1. 加载音频特征（流式处理）
    audio_file = os.path.join(data_dir, "track2_audio_features.txt")
    if os.path.exists(audio_file):
        line_count = get_file_line_count(audio_file)
        print(f"正在加载音频特征文件（约{line_count}行）...")
        with open(audio_file, "r", encoding="utf-8") as f:
            pbar = tqdm(f, total=line_count, desc="音频特征", disable=not show_progress)
            error_count = 0
            for line in pbar:
                try:
                    data = json.loads(line.strip())
                    item_id = str(data["item_id"])
                    audio_features[item_id] = np.array(data["audio_feature_128_dim"], dtype=np.float32)
                except Exception as e:
                    error_count += 1
                    if error_count <= 5:  # 只显示前5个错误
                        print(f"  解析音频特征失败（第{error_count}个）: {e}")
            if error_count > 5:
                print(f"  共{error_count}条音频特征解析失败")
        print(f"✓ 加载音频特征：{len(audio_features)}条")
    else:
        print(f"⚠ 音频特征文件不存在: {audio_file}")
    
    # 2. 加载视频特征（流式处理）
    video_file = os.path.join(data_dir, "track2_video_features.txt")
    if os.path.exists(video_file):
        line_count = get_file_line_count(video_file)
        print(f"正在加载视频特征文件（约{line_count}行）...")
        with open(video_file, "r", encoding="utf-8") as f:
            pbar = tqdm(f, total=line_count, desc="视频特征", disable=not show_progress)
            error_count = 0
            for line in pbar:
                try:
                    data = json.loads(line.strip())
                    item_id = str(data["item_id"])
                    video_features[item_id] = np.array(data["video_feature_dim_128"], dtype=np.float32)
                except Exception as e:
                    error_count += 1
                    if error_count <= 5:
                        print(f"  解析视频特征失败（第{error_count}个）: {e}")
            if error_count > 5:
                print(f"  共{error_count}条视频特征解析失败")
        print(f"✓ 加载视频特征：{len(video_features)}条")
    else:
        print(f"⚠ 视频特征文件不存在: {video_file}")
    
    # 3. 加载标题特征（词袋特征，流式处理）
    title_file = os.path.join(data_dir, "track2_title.txt")
    if os.path.exists(title_file):
        line_count = get_file_line_count(title_file)
        print(f"正在加载标题特征文件（约{line_count}行）...")
        with open(title_file, "r", encoding="utf-8") as f:
            pbar = tqdm(f, total=line_count, desc="标题特征", disable=not show_progress)
            error_count = 0
            for line in pbar:
                try:
                    data = json.loads(line.strip())
                    item_id = str(data["item_id"])
                    title_features[item_id] = data["title_features"]
                except Exception as e:
                    error_count += 1
                    if error_count <= 5:
                        print(f"  解析标题特征失败（第{error_count}个）: {e}")
            if error_count > 5:
                print(f"  共{error_count}条标题特征解析失败")
        print(f"✓ 加载标题特征：{len(title_features)}条")
    else:
        print(f"⚠ 标题特征文件不存在: {title_file}")
    
    # 4. 加载人脸属性（可选，流式处理）
    face_file = os.path.join(data_dir, "track2_face_attrs.txt")
    if os.path.exists(face_file):
        line_count = get_file_line_count(face_file)
        print(f"正在加载人脸属性文件（约{line_count}行）...")
        with open(face_file, "r", encoding="utf-8") as f:
            pbar = tqdm(f, total=line_count, desc="人脸属性", disable=not show_progress)
            error_count = 0
            for line in pbar:
                try:
                    data = json.loads(line.strip())
                    item_id = str(data["item_id"])
                    face_attrs[item_id] = data["face_attrs"]
                except Exception as e:
                    error_count += 1
                    if error_count <= 5:
                        print(f"  解析人脸属性失败（第{error_count}个）: {e}")
            if error_count > 5:
                print(f"  共{error_count}条人脸属性解析失败")
        print(f"✓ 加载人脸属性：{len(face_attrs)}条")
    else:
        print(f"⚠ 人脸属性文件不存在: {face_file}（可选）")
    
    # 5. 加载交互数据（使用优化的解析方法）
    interaction_file = os.path.join(data_dir, "final_track2_train.txt")
    interaction_df = None
    if os.path.exists(interaction_file):
        interaction_df = parse_interaction_data(interaction_file, show_progress=show_progress)
    else:
        print(f"⚠ 交互数据文件不存在: {interaction_file}")
    
    print(f"\n数据加载完成！")
    print(f"  音频特征: {len(audio_features)}条")
    print(f"  视频特征: {len(video_features)}条")
    print(f"  标题特征: {len(title_features)}条")
    print(f"  人脸属性: {len(face_attrs)}条")
    print(f"  交互数据: {len(interaction_df) if interaction_df is not None else 0}条")
    
    return audio_features, video_features, title_features, face_attrs, interaction_df


def parse_interaction_data(interaction_file, show_progress=True, chunksize=100000):
    """
    解析交互数据文件（制表符分隔，支持大文件分块读取）
    格式：user_id, age, item_id, other_id, like, comment, share, other, user_id2, item_id2, timestamp, category
    """
    column_names = [
        "user_id", "user_city", "item_id", "author_id",
        "item_city", "channel", "finish", "like",
        "music_id", "did", "creat_time", "video_duration"
    ]
    
    try:
        print(f"正在解析交互数据文件（分块读取，每块{chunksize}行）...")
        
        # 分块读取大文件
        chunks = []
        line_count = get_file_line_count(interaction_file)
        
        chunk_reader = pd.read_csv(
            interaction_file, 
            sep="\t", 
            header=None, 
            names=column_names,
            chunksize=chunksize,
            low_memory=False
        )
        
        pbar = tqdm(chunk_reader, total=(line_count // chunksize + 1) if line_count else None, 
                   desc="解析交互数据", disable=not show_progress)
        
        for chunk in pbar:
            # 数据类型转换
            chunk["user_id"] = chunk["user_id"].astype(str)
            chunk["item_id"] = chunk["item_id"].astype(str)
            
            # 处理交互类型（根据列5-8构建interaction_type）
            # 论文定义：1=点赞, 2=观看完
            chunk["interaction_type"] = 0
            chunk.loc[chunk["like"] == 1, "interaction_type"] = 1
            chunk.loc[chunk["finish"] == 1, "interaction_type"] = 2

            # 复合交互（多个交互类型同时存在）
            interaction_count = chunk[["like", "finish"]].sum(axis=1)
            chunk.loc[interaction_count > 1, "interaction_type"] = 3
            
            # 处理时间戳（尝试多种格式）
            try:
                # 先尝试作为数值（可能是毫秒或秒时间戳）
                if pd.api.types.is_numeric_dtype(chunk["timestamp"]):
                    # 判断是毫秒还是秒（大于1e12可能是毫秒）
                    if chunk["timestamp"].max() > 1e12:
                        chunk["timestamp"] = pd.to_datetime(chunk["timestamp"], unit="ms", errors="coerce")
                    else:
                        chunk["timestamp"] = pd.to_datetime(chunk["timestamp"], unit="s", errors="coerce")
                else:
                    chunk["timestamp"] = pd.to_datetime(chunk["timestamp"], errors="coerce")
            except:
                # 如果转换失败，设为NaT
                chunk["timestamp"] = pd.NaT
            
            chunks.append(chunk)
        
        # 合并所有块
        df = pd.concat(chunks, ignore_index=True)
        
        # 释放内存
        del chunks
        gc.collect()
        
        print(f"✓ 解析交互数据：{len(df)}条")
        return df
    except Exception as e:
        print(f"✗ 解析交互文件失败：{e}")
        import traceback
        traceback.print_exc()
        return None


def clean_user_data(user_df, interaction_df):
    """
    用户数据清洗（源自论文第三章：删除无交互、全脱敏用户）
    优化：改进用户去重算法，提高效率
    """
    print("\n" + "=" * 60)
    print("步骤2: 用户数据清洗")
    print("=" * 60)
    
    original_count = len(user_df)
    
    # 1. 保留有有效交互的用户（interaction_type∈{1,2,3,4}）
    print("  筛选有有效交互的用户...")
    valid_user_ids = set(interaction_df[interaction_df["interaction_type"] != 0]["user_id"].unique())
    user_df = user_df[user_df["user_id"].isin(valid_user_ids)].copy()
    print(f"  有效用户数：{len(user_df)}")
    
    # 2. 删除全脱敏用户（age=-1且gender=-1，如果有gender字段）
    if "gender" in user_df.columns:
        before_count = len(user_df)
        user_df = user_df[~((user_df["age"] == -1) & (user_df["gender"] == -1))].copy()
        print(f"  删除全脱敏用户：{before_count - len(user_df)}人")
    elif (user_df["age"] == -1).all():
        # 如果所有用户age都是-1，可能需要保留（根据实际情况调整）
        print("  警告：所有用户age都是-1，保留所有用户")
    
    # 3. 用户ID去重（优化：使用更高效的算法）
    # 对于大数据集，O(n²)算法太慢，这里采用简化策略：
    # 如果用户数量很大（>10000），跳过精确去重，或使用采样方法
    if len(user_df) > 10000:
        print(f"  用户数量较大（{len(user_df)}），跳过精确去重以节省时间")
        print("  提示：如需精确去重，可以后续优化")
    else:
        print("  执行用户去重（交互视频重合度≥80%视为同一用户）...")
        # 构建用户-视频交互映射（只处理有效用户）
        user_interaction_map = defaultdict(set)
        valid_user_set = set(user_df["user_id"].unique())
        
        # 只处理有效用户的交互
        valid_interactions = interaction_df[interaction_df["user_id"].isin(valid_user_set)]
        for _, row in tqdm(valid_interactions.iterrows(), total=len(valid_interactions), 
                          desc="构建用户交互映射", disable=len(valid_interactions) < 10000):
            user_interaction_map[row["user_id"]].add(row["item_id"])
        
        # 查找重复用户（使用采样或优化算法）
        duplicate_uids = set()
        uid_list = list(user_interaction_map.keys())
        
        # 如果用户太多，使用采样方法
        sample_size = min(5000, len(uid_list))
        if len(uid_list) > sample_size:
            print(f"  用户数量较多，使用采样方法（采样{sample_size}个用户进行去重检查）")
            uid_list = random.sample(uid_list, sample_size)
        
        print("  检查用户重复...")
        for i, uid1 in enumerate(tqdm(uid_list, desc="去重检查", disable=len(uid_list) < 1000)):
            if uid1 in duplicate_uids:
                continue
            set1 = user_interaction_map[uid1]
            if len(set1) == 0:
                continue
            
            for uid2 in uid_list[i + 1:]:
                if uid2 in duplicate_uids:
                    continue
                set2 = user_interaction_map[uid2]
                if len(set2) == 0:
                    continue
                
                # 计算Jaccard相似度
                overlap = len(set1 & set2)
                union = len(set1 | set2)
                if union > 0 and overlap / union >= 0.8:
                    duplicate_uids.add(uid2)
        
        if duplicate_uids:
            user_df = user_df[~user_df["user_id"].isin(duplicate_uids)].copy()
            print(f"  删除重复用户：{len(duplicate_uids)}人")
        
        del user_interaction_map
        gc.collect()
    
    print(f"✓ 清洗后用户数：{len(user_df)}人（剔除{original_count - len(user_df)}人）")
    return user_df


def clean_video_data(video_df, audio_features, video_features, title_features):
    """
    视频数据清洗（源自论文1-625节：时长≤300秒、格式有效、元数据非空）
    """
    print("\n" + "=" * 60)
    print("步骤3: 视频数据清洗")
    print("=" * 60)
    
    original_count = len(video_df)
    
    # 1. 保留有特征的视频（至少有一种特征）
    valid_item_ids = set(audio_features.keys()) | set(video_features.keys()) | set(title_features.keys())
    video_df = video_df[video_df["item_id"].isin(valid_item_ids)].copy()
    
    # 2. 如果有duration字段，过滤时长
    if "duration" in video_df.columns:
        video_df = video_df[video_df["duration"] <= CONFIG["video_max_duration"]].copy()
    
    # 3. 如果有category字段，过滤类别
    if "category" in video_df.columns:
        video_df = video_df[
            (video_df["category"].notna()) & 
            (video_df["category"] != "未知") & 
            (video_df["category"] != "")
        ].copy()
    
    print(f"✓ 清洗后视频数：{len(video_df)}条（剔除{original_count - len(video_df)}条）")
    return video_df


def clean_interaction_data(interaction_df, user_df, video_df):
    """
    交互数据清洗（源自论文1-625节：时间戳异常、去重、类型标准化）
    """
    print("\n" + "=" * 60)
    print("步骤4: 交互数据清洗")
    print("=" * 60)
    
    original_count = len(interaction_df)
    
    # 1. 保留有效用户/视频的交互
    valid_user_ids = set(user_df["user_id"])
    valid_item_ids = set(video_df["item_id"])
    interaction_df = interaction_df[
        interaction_df["user_id"].isin(valid_user_ids) &
        interaction_df["item_id"].isin(valid_item_ids)
    ].copy()
    
    # 2. 时间戳异常过滤（论文：2019-01-01至2020-01-01）
    if "timestamp" in interaction_df.columns and interaction_df["timestamp"].notna().any():
        start_date = pd.to_datetime("2019-01-01")
        end_date = pd.to_datetime("2020-01-01")
        interaction_df = interaction_df[
            (interaction_df["timestamp"] >= start_date) &
            (interaction_df["timestamp"] <= end_date)
        ].copy()
    
    # 3. 交互去重（同一用户-视频保留首次交互）
    interaction_df = interaction_df.drop_duplicates(
        subset=["user_id", "item_id"], keep="first"
    ).copy()
    
    # 4. 仅保留有效交互（剔除interaction_type=0）
    interaction_df = interaction_df[interaction_df["interaction_type"] != 0].copy()
    
    print(f"✓ 清洗后交互数：{len(interaction_df)}条（剔除{original_count - len(interaction_df)}条）")
    return interaction_df


def process_user_features(user_df, interaction_df):
    """
    用户特征处理（年龄分桶、性别处理）
    优化：处理异常年龄值，避免age_bin产生NaN
    """
    print("\n" + "=" * 60)
    print("步骤5: 用户特征处理")
    print("=" * 60)
    
    user_df = user_df.copy()
    
    # 1. 年龄分桶（论文3.2.1节：分5个区间）
    age_bins = [0, 18, 30, 45, 60, 150]
    age_labels = [0, 1, 2, 3, 4]
    
    # 处理异常年龄值（age=-1 或超出范围的值）
    print(f"  处理前年龄范围：{user_df['age'].min()} - {user_df['age'].max()}")
    if (user_df["age"] == -1).any() or (user_df["age"] < 0).any() or (user_df["age"] > 150).any():
        median_age = user_df[(user_df["age"] > 0) & (user_df["age"] <= 150)]["age"].median()
        fill_age = median_age if not pd.isna(median_age) else 30
        # 替换所有异常值
        mask = (user_df["age"] == -1) | (user_df["age"] < 0) | (user_df["age"] > 150)
        if mask.any():
            print(f"  发现{mask.sum()}个异常年龄值，使用{fill_age}填充")
            user_df.loc[mask, "age"] = fill_age
    
    # 确保所有年龄值在有效范围内
    user_df["age"] = user_df["age"].clip(lower=0, upper=149)  # 上限设为149，确保在[0, 150)范围内
    
    # 执行分桶，使用include_lowest=True确保最小值被包含
    user_df["age_bin"] = pd.cut(
        user_df["age"], 
        bins=age_bins, 
        labels=age_labels, 
        right=False,
        include_lowest=True
    )
    
    # 检查是否有NaN值，如果有则填充
    if user_df["age_bin"].isna().any():
        print(f"  警告：发现{user_df['age_bin'].isna().sum()}个NaN的age_bin，使用最频繁值填充")
        most_frequent = user_df["age_bin"].mode()
        if len(most_frequent) > 0:
            fill_bin = most_frequent.iloc[0]
        else:
            fill_bin = age_labels[2]  # 默认使用中间值
        user_df["age_bin"] = user_df["age_bin"].fillna(fill_bin)
    
    # 2. 性别处理（如果有gender字段）
    if "gender" not in user_df.columns:
        # 如果没有gender字段，创建一个默认值
        user_df["gender"] = 0
    
    # 处理脱敏性别（gender=-1）
    if (user_df["gender"] == -1).any():
        mode_gender = user_df[user_df["gender"] != -1]["gender"].mode()
        default_gender = mode_gender.iloc[0] if len(mode_gender) > 0 else 0
        user_df.loc[user_df["gender"] == -1, "gender"] = default_gender
    
    print(f"✓ 用户特征处理完成：{len(user_df)}人")
    print(f"  age_bin分布：{user_df['age_bin'].value_counts().to_dict()}")
    return user_df


def encode_user_features(user_df):
    """
    用户特征编码（论文3.2.1节：One-Hot+ID映射+特征拼接）
    优化：确保数据类型一致，避免类型转换错误，处理NaN值
    """
    print("\n" + "=" * 60)
    print("步骤6: 用户特征编码")
    print("=" * 60)
    
    # 1. user_id映射为连续索引（先做，因为后面要用）
    user_id_to_idx = {uid: idx for idx, uid in enumerate(user_df["user_id"].unique())}
    user_df = user_df.copy()
    user_df["user_idx"] = user_df["user_id"].map(user_id_to_idx).astype(int)
    
    # 2. One-Hot编码（年龄分桶、虚拟地区）
    # 处理age_bin中的NaN值（用最频繁的age_bin填充，或使用默认值）
    if user_df["age_bin"].isna().any():
        print(f"  发现{user_df['age_bin'].isna().sum()}个NaN的age_bin值，使用最频繁值填充...")
        most_frequent_age_bin = user_df["age_bin"].mode()
        if len(most_frequent_age_bin) > 0:
            fill_value = most_frequent_age_bin.iloc[0]
        else:
            fill_value = 2  # 默认使用中间年龄组
        user_df["age_bin"] = user_df["age_bin"].fillna(fill_value)
        print(f"  使用值 {fill_value} 填充NaN")
    
    # 将age_bin转换为数值（如果是Categorical类型，先转换为数值）
    if isinstance(user_df["age_bin"].dtype, pd.CategoricalDtype):
        user_df["age_bin"] = user_df["age_bin"].cat.codes
        # cat.codes可能产生-1（对于NaN），需要处理
        user_df["age_bin"] = user_df["age_bin"].replace(-1, user_df["age_bin"].max() + 1 if user_df["age_bin"].max() >= 0 else 0)
    else:
        # 如果不是categorical，尝试转换为int
        user_df["age_bin"] = pd.to_numeric(user_df["age_bin"], errors="coerce")
        # 再次处理可能剩余的NaN
        if user_df["age_bin"].isna().any():
            user_df["age_bin"] = user_df["age_bin"].fillna(user_df["age_bin"].median())
        user_df["age_bin"] = user_df["age_bin"].astype(int)
    
    age_onehot = pd.get_dummies(user_df["age_bin"], prefix="age", dtype=float)
    
    # 虚拟地区：基于用户ID生成（简化处理）
    user_df["region"] = (pd.factorize(user_df["user_id"])[0] % 5).astype(int)
    region_onehot = pd.get_dummies(user_df["region"], prefix="region", dtype=float)
    
    # 3. 性别0-1编码
    user_df["gender_encoded"] = user_df["gender"].astype(float)
    
    # 4. 特征拼接（user_idx + gender + age_onehot + region_onehot）
    # 确保所有列都是数值类型
    user_idx_col = user_df[["user_idx"]].astype(float)
    gender_col = user_df[["gender_encoded"]].astype(float)
    
    user_features_df = pd.concat([
        user_idx_col,
        gender_col,
        age_onehot,
        region_onehot
    ], axis=1)
    
    # 转换为numpy数组（确保是float类型）
    user_features = user_features_df.values.astype(np.float32)
    
    # 转换为torch张量
    user_features = torch.FloatTensor(user_features)
    print(f"✓ 用户特征矩阵形状：{user_features.shape}")
    print(f"  特征维度：user_idx(1) + gender(1) + age_onehot({age_onehot.shape[1]}) + region_onehot({region_onehot.shape[1]}) = {user_features.shape[1]}")
    return user_features, user_id_to_idx


def convert_bag_of_words_to_vector(bag_of_words, vocab_size, normalize=True):
    """
    将词袋特征转换为固定维度向量
    """
    vector = np.zeros(vocab_size, dtype=np.float32)
    for word_id, count in bag_of_words.items():
        try:
            idx = int(word_id) - 1  # 假设词ID从1开始
            if 0 <= idx < vocab_size:
                vector[idx] = float(count)
        except:
            continue
    
    if normalize and np.sum(vector) > 0:
        vector = vector / np.sum(vector)
    
    return vector


def extract_visual_features_from_vectors(video_df, video_features_dict):
    """
    从已有的128维视频特征提取250维特征（论文3.2.1节）
    优化：批量处理，减少内存占用，处理缺失和维度不一致的特征
    """
    print("\n" + "=" * 60)
    print("步骤7: 视觉特征提取")
    print("=" * 60)
    
    video_ids = list(video_df["item_id"].unique())
    video_id_to_idx = {vid: idx for idx, vid in enumerate(video_ids)}
    
    print(f"  处理{len(video_ids)}个视频的视觉特征...")
    
    # 批量提取特征向量
    batch_size = 10000
    visual_features_list = []
    missing_count = 0
    dimension_error_count = 0
    
    for i in tqdm(range(0, len(video_ids), batch_size), desc="提取视觉特征"):
        batch_ids = video_ids[i:i + batch_size]
        batch_features = []
        for item_id in batch_ids:
            item_id_str = str(item_id)
            feat = video_features_dict.get(item_id_str, None)
            
            if feat is None:
                # 缺失特征，使用零向量
                feat = np.zeros(128, dtype=np.float32)
                missing_count += 1
            else:
                # 确保是numpy数组
                if not isinstance(feat, np.ndarray):
                    feat = np.array(feat, dtype=np.float32)
                else:
                    feat = feat.astype(np.float32)
                
                # 检查维度，如果不是128维，进行处理
                if feat.shape[0] != 128:
                    if feat.shape[0] > 128:
                        # 如果维度大于128，截取前128维
                        feat = feat[:128]
                        dimension_error_count += 1
                    elif feat.shape[0] < 128:
                        # 如果维度小于128，用零填充
                        padded = np.zeros(128, dtype=np.float32)
                        padded[:feat.shape[0]] = feat
                        feat = padded
                        dimension_error_count += 1
            
            # 确保是1维数组，长度为128
            if feat.ndim > 1:
                feat = feat.flatten()[:128]
            elif feat.ndim == 0:
                feat = np.zeros(128, dtype=np.float32)
            
            # 最终检查：确保是128维
            if len(feat) != 128:
                feat = np.zeros(128, dtype=np.float32)
            
            batch_features.append(feat)
        
        # 转换为numpy数组并处理
        try:
            batch_array = np.array(batch_features, dtype=np.float32)  # (batch, 128)
            # 验证形状
            if batch_array.shape[1] != 128:
                print(f"  警告：批次{i}的特征维度不正确，形状为{batch_array.shape}")
                # 修复：确保是128维
                if batch_array.shape[1] > 128:
                    batch_array = batch_array[:, :128]
                else:
                    padded = np.zeros((batch_array.shape[0], 128), dtype=np.float32)
                    padded[:, :batch_array.shape[1]] = batch_array
                    batch_array = padded
            visual_features_list.append(batch_array)
        except Exception as e:
            print(f"  错误：批次{i}处理失败：{e}")
            # 使用零向量作为备用
            batch_array = np.zeros((len(batch_ids), 128), dtype=np.float32)
            visual_features_list.append(batch_array)
    
    if missing_count > 0:
        print(f"  警告：{missing_count}个视频缺少视觉特征，使用零向量填充")
    if dimension_error_count > 0:
        print(f"  警告：{dimension_error_count}个视频的特征维度不正确，已修复")
    
    # 合并所有批次
    visual_features = np.vstack(visual_features_list)  # (N, 128)
    
    # 映射到250维（使用线性层，论文3.2.1节）
    # 注意：这里使用随机初始化的线性层，实际训练时会更新权重
    linear_layer = nn.Linear(128, CONFIG["modal_dim"])
    # 使用Xavier初始化
    nn.init.xavier_uniform_(linear_layer.weight)
    nn.init.zeros_(linear_layer.bias)
    
    with torch.no_grad():
        visual_features_tensor = torch.FloatTensor(visual_features)
        visual_features_250 = linear_layer(visual_features_tensor).numpy()
    
    # 转换为torch张量
    visual_features = torch.FloatTensor(visual_features_250)
    print(f"✓ 视觉特征矩阵形状：{visual_features.shape}（{CONFIG['modal_dim']}维）")
    
    # 释放内存
    del visual_features_list
    gc.collect()
    
    return visual_features, video_id_to_idx


def extract_audio_features_from_vectors(video_df, audio_features_dict):
    """
    从已有的128维音频特征提取250维特征（论文3.2.1节）
    优化：批量处理，减少内存占用，处理缺失和维度不一致的特征
    """
    print("\n" + "=" * 60)
    print("步骤8: 音频特征提取")
    print("=" * 60)
    
    video_ids = list(video_df["item_id"].unique())
    
    print(f"  处理{len(video_ids)}个视频的音频特征...")
    
    # 批量提取特征向量
    batch_size = 10000
    audio_features_list = []
    missing_count = 0
    dimension_error_count = 0
    
    for i in tqdm(range(0, len(video_ids), batch_size), desc="提取音频特征"):
        batch_ids = video_ids[i:i + batch_size]
        batch_features = []
        for item_id in batch_ids:
            item_id_str = str(item_id)
            feat = audio_features_dict.get(item_id_str, None)
            
            if feat is None:
                # 缺失特征，使用零向量
                feat = np.zeros(128, dtype=np.float32)
                missing_count += 1
            else:
                # 确保是numpy数组
                if not isinstance(feat, np.ndarray):
                    feat = np.array(feat, dtype=np.float32)
                else:
                    feat = feat.astype(np.float32)
                
                # 检查维度，如果不是128维，进行处理
                if feat.shape[0] != 128:
                    if feat.shape[0] > 128:
                        feat = feat[:128]
                        dimension_error_count += 1
                    elif feat.shape[0] < 128:
                        padded = np.zeros(128, dtype=np.float32)
                        padded[:feat.shape[0]] = feat
                        feat = padded
                        dimension_error_count += 1
            
            # 确保是1维数组，长度为128
            if feat.ndim > 1:
                feat = feat.flatten()[:128]
            elif feat.ndim == 0:
                feat = np.zeros(128, dtype=np.float32)
            
            if len(feat) != 128:
                feat = np.zeros(128, dtype=np.float32)
            
            batch_features.append(feat)
        
        # 转换为numpy数组
        try:
            batch_array = np.array(batch_features, dtype=np.float32)  # (batch, 128)
            if batch_array.shape[1] != 128:
                if batch_array.shape[1] > 128:
                    batch_array = batch_array[:, :128]
                else:
                    padded = np.zeros((batch_array.shape[0], 128), dtype=np.float32)
                    padded[:, :batch_array.shape[1]] = batch_array
                    batch_array = padded
            audio_features_list.append(batch_array)
        except Exception as e:
            print(f"  错误：批次{i}处理失败：{e}")
            batch_array = np.zeros((len(batch_ids), 128), dtype=np.float32)
            audio_features_list.append(batch_array)
    
    if missing_count > 0:
        print(f"  警告：{missing_count}个视频缺少音频特征，使用零向量填充")
    if dimension_error_count > 0:
        print(f"  警告：{dimension_error_count}个视频的音频特征维度不正确，已修复")
    
    # 合并所有批次
    audio_features = np.vstack(audio_features_list)  # (N, 128)
    
    # 映射到250维（使用线性层，论文3.2.1节）
    linear_layer = nn.Linear(128, CONFIG["modal_dim"])
    # 使用Xavier初始化
    nn.init.xavier_uniform_(linear_layer.weight)
    nn.init.zeros_(linear_layer.bias)
    
    with torch.no_grad():
        audio_features_tensor = torch.FloatTensor(audio_features)
        audio_features_250 = linear_layer(audio_features_tensor).numpy()
    
    # 转换为torch张量
    audio_features = torch.FloatTensor(audio_features_250)
    print(f"✓ 音频特征矩阵形状：{audio_features.shape}（{CONFIG['modal_dim']}维）")
    
    # 释放内存
    del audio_features_list
    gc.collect()
    
    return audio_features


def extract_text_features_from_vectors(video_df, title_features_dict, vocab_size):
    """
    从词袋特征提取250维文本特征（论文3.2.1节）
    优化：减小批次大小，避免内存溢出；先映射到250维再合并
    """
    print("\n" + "=" * 60)
    print("步骤9: 文本特征提取")
    print("=" * 60)
    
    video_ids = list(video_df["item_id"].unique())
    
    print(f"  处理{len(video_ids)}个视频的文本特征（词汇表大小：{vocab_size}）...")
    print(f"  注意：词汇表较大，使用小批次处理以避免内存溢出")
    
    # 创建线性层（用于映射到250维）
    linear_layer = nn.Linear(vocab_size, CONFIG["modal_dim"])
    # 使用Xavier初始化
    nn.init.xavier_uniform_(linear_layer.weight)
    nn.init.zeros_(linear_layer.bias)
    
    # 根据词汇表大小动态调整批次大小
    # 计算每个批次的内存需求：batch_size * vocab_size * 4 bytes (float32)
    # 目标：每个批次不超过1GB
    max_memory_per_batch_gb = 1.0  # 1GB
    max_elements_per_batch = int(max_memory_per_batch_gb * 1024 * 1024 * 1024 / 4)  # float32 = 4 bytes
    batch_size = max(1, min(1000, max_elements_per_batch // vocab_size))
    
    print(f"  批次大小：{batch_size}（根据内存自动调整）")
    
    # 批量提取特征向量并立即映射到250维
    text_features_250_list = []
    
    for i in tqdm(range(0, len(video_ids), batch_size), desc="提取文本特征"):
        batch_ids = video_ids[i:i + batch_size]
        batch_features = []
        
        for item_id in batch_ids:
            bag_of_words = title_features_dict.get(str(item_id), {})
            feat_bow = convert_bag_of_words_to_vector(bag_of_words, vocab_size, normalize=True)
            batch_features.append(feat_bow)
        
        # 转换为numpy数组
        try:
            batch_array = np.array(batch_features, dtype=np.float32)  # (batch, vocab_size)
            
            # 立即映射到250维，避免存储大的中间矩阵
            with torch.no_grad():
                batch_tensor = torch.FloatTensor(batch_array)
                batch_250 = linear_layer(batch_tensor).numpy()
            
            text_features_250_list.append(batch_250)
            
            # 释放中间变量
            del batch_array, batch_tensor, batch_250
            gc.collect()
            
        except MemoryError as e:
            print(f"  内存不足，减小批次大小...")
            # 如果内存不足，进一步减小批次大小
            batch_size = max(1, batch_size // 2)
            print(f"  新批次大小：{batch_size}")
            # 重新处理当前批次
            i -= batch_size
            continue
        except Exception as e:
            print(f"  错误：批次{i}处理失败：{e}")
            # 使用零向量作为备用
            batch_250 = np.zeros((len(batch_ids), CONFIG["modal_dim"]), dtype=np.float32)
            text_features_250_list.append(batch_250)
    
    # 合并所有批次（此时已经是250维了）
    text_features = np.vstack(text_features_250_list)  # (N, 250)
    
    # 转换为torch张量
    text_features = torch.FloatTensor(text_features)
    print(f"✓ 文本特征矩阵形状：{text_features.shape}（{CONFIG['modal_dim']}维）")
    
    # 释放内存
    del text_features_250_list
    gc.collect()
    
    return text_features


def fuse_multimodal_features(visual_feat, text_feat, audio_feat, video_id_to_idx, video_df):
    """
    多模态特征融合（论文3.2.1节：video_idx + 视觉 + 文本 + 音频）
    """
    print("\n" + "=" * 60)
    print("步骤10: 多模态特征融合")
    print("=" * 60)
    
    # video_id映射为连续索引
    video_df["video_idx"] = video_df["item_id"].map(video_id_to_idx)
    video_idx = torch.LongTensor(video_df["video_idx"].values).unsqueeze(1)  # (M,1)
    
    # 特征拼接（video_idx + 视觉250 + 文本250 + 音频250）
    multimodal_feat = torch.cat([
        video_idx.float(),  # 转换为float
        visual_feat,
        text_feat,
        audio_feat
    ], dim=1)
    
    print(f"✓ 短视频多模态特征矩阵形状：{multimodal_feat.shape}（751维）")
    return multimodal_feat


def build_interaction_graph(interaction_df, user_id_to_idx, video_id_to_idx):
    """
    构建用户-短视频二部图（论文3.2.3节：邻接矩阵）
    优化：过滤无效索引，提高构建效率
    """
    print("\n" + "=" * 60)
    print("步骤11: 构建交互图")
    print("=" * 60)
    
    # 1. 映射用户/视频索引
    interaction_df = interaction_df.copy()
    interaction_df["user_idx"] = interaction_df["user_id"].map(user_id_to_idx)
    interaction_df["video_idx"] = interaction_df["item_id"].map(video_id_to_idx)
    
    # 2. 过滤无效映射（NaN值）
    before_count = len(interaction_df)
    interaction_df = interaction_df[
        interaction_df["user_idx"].notna() & interaction_df["video_idx"].notna()
    ].copy()
    if before_count > len(interaction_df):
        print(f"  过滤无效映射：{before_count - len(interaction_df)}条")
    
    # 3. 边权重赋值（论文3.2.3节：复合交互2.0、分享1.5、评论1.2、点赞1.0）
    weight_map = {4: 2.0, 3: 1.5, 2: 1.2, 1: 1.0}
    interaction_df["weight"] = interaction_df["interaction_type"].map(weight_map)
    
    # 4. 如果同一用户-视频有多条边，合并权重（取最大值或求和）
    # 这里采用求和策略
    print("  合并重复边（同一用户-视频的多条交互）...")
    interaction_df_agg = interaction_df.groupby(["user_idx", "video_idx"])["weight"].sum().reset_index()
    
    # 5. 构建COO稀疏矩阵
    num_users = len(user_id_to_idx)
    num_videos = len(video_id_to_idx)
    row = interaction_df_agg["user_idx"].astype(int).values
    col = interaction_df_agg["video_idx"].astype(int).values
    data = interaction_df_agg["weight"].values
    
    adj_matrix = coo_matrix((data, (row, col)), shape=(num_users, num_videos))
    sparsity = len(data) / (num_users * num_videos) if (num_users * num_videos) > 0 else 0
    print(f"✓ 交互图邻接矩阵形状：{adj_matrix.shape}（边数：{len(data)}，稀疏度：{sparsity:.6f}）")
    return adj_matrix


def split_dataset(interaction_df, user_id_to_idx):
    """
    按用户内时间序列划分训练/验证/测试集（论文3.4.3节）
    优化：添加user_idx列，提高后续处理效率
    """
    print("\n" + "=" * 60)
    print("步骤12: 数据集划分")
    print("=" * 60)
    
    # 确保interaction_df有user_idx列
    if "user_idx" not in interaction_df.columns:
        interaction_df = interaction_df.copy()
        interaction_df["user_idx"] = interaction_df["user_id"].map(user_id_to_idx)
        interaction_df = interaction_df[interaction_df["user_idx"].notna()].copy()
    
    train_df_list = []
    val_df_list = []
    test_df_list = []
    
    # 按用户分组，按时间戳排序后划分
    user_groups = interaction_df.groupby("user_id")
    total_users = len(user_groups)
    
    print(f"  处理{total_users}个用户的数据划分...")
    for uid, user_interaction in tqdm(user_groups, total=total_users, desc="划分数据集", disable=total_users < 1000):
        user_interaction = user_interaction.copy()
        
        # 按时间戳升序排序（历史→未来）
        if "timestamp" in user_interaction.columns and user_interaction["timestamp"].notna().any():
            user_interaction = user_interaction.sort_values("timestamp").reset_index(drop=True)
        else:
            # 如果没有时间戳，随机排序
            user_interaction = user_interaction.sample(frac=1, random_state=42).reset_index(drop=True)
        
        total = len(user_interaction)
        if total == 0:
            continue
        
        # 划分比例
        train_size = int(total * CONFIG["train_val_test_split"][0])
        val_size = int(total * CONFIG["train_val_test_split"][1])
        
        # 分配数据
        if train_size > 0:
            train_df_list.append(user_interaction.iloc[:train_size])
        if val_size > 0:
            val_df_list.append(user_interaction.iloc[train_size:train_size + val_size])
        if total > train_size + val_size:
            test_df_list.append(user_interaction.iloc[train_size + val_size:])
    
    # 合并
    train_df = pd.concat(train_df_list, ignore_index=True) if train_df_list else pd.DataFrame()
    val_df = pd.concat(val_df_list, ignore_index=True) if val_df_list else pd.DataFrame()
    test_df = pd.concat(test_df_list, ignore_index=True) if test_df_list else pd.DataFrame()
    
    # 确保所有DataFrame都有user_idx列
    if len(train_df) > 0 and "user_idx" not in train_df.columns:
        train_df["user_idx"] = train_df["user_id"].map(user_id_to_idx)
    if len(val_df) > 0 and "user_idx" not in val_df.columns:
        val_df["user_idx"] = val_df["user_id"].map(user_id_to_idx)
    if len(test_df) > 0 and "user_idx" not in test_df.columns:
        test_df["user_idx"] = test_df["user_id"].map(user_id_to_idx)
    
    print(f"✓ 数据集划分：训练{len(train_df)}条，验证{len(val_df)}条，测试{len(test_df)}条")
    return train_df, val_df, test_df


def generate_neg_samples(pos_df, all_video_ids, video_id_to_idx, interaction_df=None, user_video_interactions=None):
    """
    生成负样本（论文3.3.4节：每个正样本5个负样本）
    优化：批量处理，提高效率；支持传入用户交互映射，避免重复计算
    """
    print("\n" + "=" * 60)
    print("步骤13: 生成负样本")
    print("=" * 60)
    
    # 1. 过滤冷门视频（交互次数<10）
    print("  过滤冷门视频...")
    if interaction_df is not None:
        video_interaction_count = interaction_df["item_id"].value_counts()
        hot_video_ids = set(video_interaction_count[video_interaction_count >= CONFIG["min_video_interactions"]].index)
    else:
        # 如果没有交互数据，使用所有视频（或从user_video_interactions统计）
        if user_video_interactions:
            from collections import Counter
            video_counts = Counter()
            for videos in user_video_interactions.values():
                video_counts.update(videos)
            hot_video_ids = {vid for vid, count in video_counts.items() 
                           if count >= CONFIG["min_video_interactions"]}
        else:
            hot_video_ids = set(all_video_ids)
    
    all_video_ids_set = set(all_video_ids) & hot_video_ids
    all_video_ids_list = list(all_video_ids_set)
    print(f"  热门视频数：{len(all_video_ids_list)}（原始：{len(all_video_ids)}）")
    
    # 2. 构建用户-已交互视频映射
    print("  构建用户交互映射...")
    if user_video_interactions is not None:
        # 直接使用传入的映射
        user_pos_videos = user_video_interactions
    else:
        # 从pos_df构建映射
        user_pos_videos = defaultdict(set)
        for _, row in tqdm(pos_df.iterrows(), total=len(pos_df), desc="构建映射", disable=len(pos_df) < 10000):
            user_pos_videos[row["user_id"]].add(row["item_id"])
    
    # 3. 生成负样本（批量处理）
    print(f"  生成负样本（每个正样本{CONFIG['neg_sample_num']}个负样本）...")
    triplets = []
    
    # 确保pos_df有user_idx列
    if "user_idx" not in pos_df.columns:
        print("  警告：pos_df缺少user_idx列，无法生成三元组")
        return pd.DataFrame(columns=["user_idx", "pos_video_idx", "neg_video_idx"])
    
    batch_size = 10000
    for i in tqdm(range(0, len(pos_df), batch_size), desc="生成三元组"):
        batch_df = pos_df.iloc[i:i + batch_size]
        
        for _, row in batch_df.iterrows():
            uid = row["user_id"]
            pos_vid = row["item_id"]
            user_idx = row["user_idx"]
            
            # 用户未交互的视频
            user_pos_set = user_pos_videos.get(uid, set())
            neg_candidates = [vid for vid in all_video_ids_list if vid not in user_pos_set]
            
            if len(neg_candidates) == 0:
                continue  # 没有负样本候选，跳过
            
            # 采样负样本
            if len(neg_candidates) < CONFIG["neg_sample_num"]:
                # 候选不足时重复采样
                neg_vids = random.choices(neg_candidates, k=CONFIG["neg_sample_num"])
            else:
                neg_vids = random.sample(neg_candidates, k=CONFIG["neg_sample_num"])
            
            # 转换为索引
            pos_video_idx = video_id_to_idx.get(str(pos_vid), -1)
            if pos_video_idx == -1:
                continue
            
            for neg_vid in neg_vids:
                neg_video_idx = video_id_to_idx.get(str(neg_vid), -1)
                if neg_video_idx != -1:
                    triplets.append([int(user_idx), int(pos_video_idx), int(neg_video_idx)])
    
    # 转换为DataFrame
    if len(triplets) > 0:
        triplet_df = pd.DataFrame(triplets, columns=["user_idx", "pos_video_idx", "neg_video_idx"])
    else:
        triplet_df = pd.DataFrame(columns=["user_idx", "pos_video_idx", "neg_video_idx"])
    
    print(f"✓ 生成BPR三元组：{len(triplet_df)}条（{len(pos_df)}正样本×{CONFIG['neg_sample_num']}负样本）")
    
    # 释放内存
    del user_pos_videos
    gc.collect()
    
    return triplet_df


class BPRDataset(Dataset):
    """BPR三元组数据集（论文3.3.4节）"""
    
    def __init__(self, triplet_df):
        self.triplets = triplet_df.values
    
    def __len__(self):
        return len(self.triplets)
    
    def __getitem__(self, idx):
        user_idx = self.triplets[idx, 0]
        pos_video_idx = self.triplets[idx, 1]
        neg_video_idx = self.triplets[idx, 2]
        return (
            torch.LongTensor([int(user_idx)]),
            torch.LongTensor([int(pos_video_idx)]),
            torch.LongTensor([int(neg_video_idx)])
        )


def build_dataloader(triplet_df, shuffle=True):
    """构建DataLoader（论文3.4.3节：batch_size=2048）"""
    dataset = BPRDataset(triplet_df)
    dataloader = DataLoader(
        dataset,
        batch_size=CONFIG["batch_size"],
        shuffle=shuffle,
        drop_last=False,
        num_workers=0  # Windows上建议设为0
    )
    return dataloader


def full_preprocess(data_dir="./", save_dir="preprocessed_data", show_progress=True):
    """
    完整预处理流程（论文第三章）
    优化版本：支持大文件处理，优化内存使用
    """
    print("\n" + "=" * 60)
    print("开始完整数据预处理流程（优化版）")
    print("=" * 60)
    
    # 1. 加载数据
    audio_features, video_features, title_features, face_attrs, interaction_df = load_track2_data(
        data_dir, show_progress=show_progress
    )
    
    if interaction_df is None or len(interaction_df) == 0:
        print("✗ 错误：无法加载交互数据，预处理终止")
        return None
    
    # 释放不需要的数据（如果不需要人脸属性）
    # del face_attrs
    # gc.collect()
    
    # 2. 构建用户DataFrame（从交互数据中提取）
    print("\n构建用户和视频DataFrame...")
    user_df = interaction_df[["user_id", "age"]].drop_duplicates(subset=["user_id"]).copy()
    if "gender" not in user_df.columns:
        user_df["gender"] = 0  # 默认性别
    
    # 3. 构建视频DataFrame（从特征数据中提取）
    all_item_ids = set(audio_features.keys()) | set(video_features.keys()) | set(title_features.keys())
    video_df = pd.DataFrame({"item_id": list(all_item_ids)})
    if "category" in interaction_df.columns:
        category_map = interaction_df.groupby("item_id")["category"].first().to_dict()
        video_df["category"] = video_df["item_id"].map(category_map)
    
    print(f"  初始用户数：{len(user_df)}")
    print(f"  初始视频数：{len(video_df)}")
    
    # 4. 数据清洗
    user_df = clean_user_data(user_df, interaction_df)
    video_df = clean_video_data(video_df, audio_features, video_features, title_features)
    interaction_df = clean_interaction_data(interaction_df, user_df, video_df)
    
    # 5. 用户特征预处理
    user_df = process_user_features(user_df, interaction_df)
    user_features, user_id_to_idx = encode_user_features(user_df)
    
    # 6. 多模态特征提取
    print("\n确定词汇表大小...")
    # 确定词汇表大小（统计标题特征中的最大词ID）
    max_word_id = 0
    for bow in tqdm(title_features.values(), desc="统计词汇表", disable=len(title_features) < 10000):
        if isinstance(bow, dict):
            for word_id in bow.keys():
                try:
                    max_word_id = max(max_word_id, int(word_id))
                except:
                    pass
    vocab_size = max(max_word_id + 1, CONFIG["title_vocab_size"])  # +1因为索引从0开始
    print(f"  词汇表大小：{vocab_size}")
    
    visual_feat, video_id_to_idx = extract_visual_features_from_vectors(video_df, video_features)
    audio_feat = extract_audio_features_from_vectors(video_df, audio_features)
    text_feat = extract_text_features_from_vectors(video_df, title_features, vocab_size)
    
    # 释放原始特征字典以节省内存
    print("\n释放原始特征数据以节省内存...")
    del audio_features, video_features, title_features
    gc.collect()
    
    # 7. 多模态融合
    video_multimodal_feat = fuse_multimodal_features(
        visual_feat, text_feat, audio_feat, video_id_to_idx, video_df
    )
    
    # 释放单独的特征矩阵
    del visual_feat, audio_feat, text_feat
    gc.collect()
    
    # 8. 交互图构建
    adj_matrix = build_interaction_graph(interaction_df, user_id_to_idx, video_id_to_idx)
    
    # 9. 数据集划分
    train_df, val_df, test_df = split_dataset(interaction_df, user_id_to_idx)
    
    # 在释放交互数据前，保存用户-视频交互映射（用于负样本生成）
    print("\n构建用户-视频交互映射（用于负样本生成）...")
    all_interactions = pd.concat([train_df, val_df, test_df], ignore_index=True)
    user_video_interactions = all_interactions.groupby("user_id")["item_id"].agg(set).to_dict()
    
    # 释放交互数据
    del interaction_df, all_interactions
    gc.collect()
    
    # 10. 负样本生成
    all_video_ids = video_df["item_id"].unique()
    train_triplets = generate_neg_samples(train_df, all_video_ids, video_id_to_idx, 
                                         user_video_interactions=user_video_interactions)
    val_triplets = generate_neg_samples(val_df, all_video_ids, video_id_to_idx,
                                       user_video_interactions=user_video_interactions)
    
    del user_video_interactions
    gc.collect()
    
    # 11. 构建DataLoader
    train_loader = build_dataloader(train_triplets, shuffle=True)
    val_loader = build_dataloader(val_triplets, shuffle=False)
    
    # 12. 保存预处理结果
    print("\n保存预处理结果...")
    os.makedirs(save_dir, exist_ok=True)
    
    # 保存特征矩阵
    torch.save(user_features, os.path.join(save_dir, "user_features.pt"))
    torch.save(video_multimodal_feat, os.path.join(save_dir, "video_multimodal_feat.pt"))
    
    # 保存稀疏矩阵（使用更高效的格式）
    save_npz(os.path.join(save_dir, "adj_matrix.npz"), adj_matrix.tocsr())
    
    # 保存三元组
    train_triplets.to_csv(os.path.join(save_dir, "train_triplets.csv"), index=False)
    val_triplets.to_csv(os.path.join(save_dir, "val_triplets.csv"), index=False)
    test_df.to_csv(os.path.join(save_dir, "test_df.csv"), index=False)
    
    # 保存映射关系
    with open(os.path.join(save_dir, "user_id_to_idx.pkl"), "wb") as f:
        pickle.dump(user_id_to_idx, f)
    with open(os.path.join(save_dir, "video_id_to_idx.pkl"), "wb") as f:
        pickle.dump(video_id_to_idx, f)
    
    # 保存配置
    with open(os.path.join(save_dir, "config.pkl"), "wb") as f:
        pickle.dump(CONFIG, f)
    
    print("\n" + "=" * 60)
    print("预处理完成！")
    print("=" * 60)
    print(f"结果保存至：{save_dir}")
    print(f"  - user_features.pt: 用户特征矩阵")
    print(f"  - video_multimodal_feat.pt: 视频多模态特征矩阵")
    print(f"  - adj_matrix.npz: 交互图邻接矩阵（稀疏矩阵格式）")
    print(f"  - train_triplets.csv: 训练三元组")
    print(f"  - val_triplets.csv: 验证三元组")
    print(f"  - test_df.csv: 测试集")
    print(f"  - user_id_to_idx.pkl: 用户ID映射")
    print(f"  - video_id_to_idx.pkl: 视频ID映射")
    print(f"  - config.pkl: 配置参数")
    
    return {
        "user_features": user_features,
        "video_features": video_multimodal_feat,
        "adj_matrix": adj_matrix,
        "train_loader": train_loader,
        "val_loader": val_loader,
        "test_df": test_df,
        "user_id_to_idx": user_id_to_idx,
        "video_id_to_idx": video_id_to_idx
    }


if __name__ == "__main__":
    # 设置随机种子
    random.seed(42)
    np.random.seed(42)
    torch.manual_seed(42)
    
    # 运行完整预处理
    preprocessed_data = full_preprocess(
        data_dir="..",  # 数据文件所在目录
        save_dir="preprocessed_data"  # 预处理结果保存目录
    )

    if preprocessed_data:
        print("\n预处理成功完成！")
    else:
        print("\n预处理失败，请检查数据文件！")

