import v2.data_loader_explorer as data_loader_explorer


def clean_data(train_df, test_df):
    """数据清洗主函数"""

    print("开始数据清洗...")

    # 1. 处理缺失值
    def handle_missing_values(df, is_train=True):
        """处理缺失值"""
        df_clean = df.copy()

        # 识别缺失值模式（-1表示未知/缺失）
        missing_cols = ['user_city', 'author_id', 'item_city', 'music_id', 'channel']

        print(f"处理前缺失值统计 (-1的数量):")
        for col in missing_cols:
            if col in df_clean.columns:
                missing_count = (df_clean[col] == -1).sum()
                print(f"  {col}: {missing_count} ({missing_count / len(df_clean) * 100:.2f}%)")

        # 对于分类特征，将-1填充为特定的缺失类别
        for col in ['user_city', 'author_id', 'item_city', 'music_id']:
            df_clean[col] = df_clean[col].replace(-1, -999)  # 用-999表示缺失

        # 对于测试集，finish和like已经是-1，不需要处理
        if is_train:
            # 训练集中finish和like应该是0或1
            df_clean = df_clean[df_clean['finish'].isin([0, 1])]
            df_clean = df_clean[df_clean['like'].isin([0, 1])]

        return df_clean

    # 2. 处理异常值
    def handle_outliers(df):
        """处理异常值"""
        df_clean = df.copy()

        original_len = len(df_clean)

        # 视频时长异常处理
        print(f"原始数据量: {original_len}")

        # 删除视频时长为0的记录（可能是无效视频）
        df_clean = df_clean[df_clean['video_duration'] > 0]
        print(f"删除时长为0的记录后: {len(df_clean)}")

        # 删除视频时长异常大的记录（假设超过10分钟为异常）
        duration_threshold = 600  # 10分钟
        df_clean = df_clean[df_clean['video_duration'] <= duration_threshold]
        print(f"删除时长超过{duration_threshold}秒的记录后: {len(df_clean)}")

        # 检查时间戳异常（creat_time已经是脱敏时间，主要检查极端值）
        time_threshold_low = df_clean['creat_time'].quantile(0.001)
        time_threshold_high = df_clean['creat_time'].quantile(0.999)
        df_clean = df_clean[
            (df_clean['creat_time'] >= time_threshold_low) &
            (df_clean['creat_time'] <= time_threshold_high)
            ]
        print(f"删除时间戳异常值后: {len(df_clean)}")

        removed_count = original_len - len(df_clean)
        print(f"共删除异常记录: {removed_count} ({removed_count / original_len * 100:.2f}%)")

        return df_clean

    # 3. 数据类型转换
    def convert_data_types(df, is_train=True):
        """数据类型转换"""
        df_clean = df.copy()

        # 布尔类型转换
        if is_train:
            df_clean['finish'] = df_clean['finish'].astype(bool)
            df_clean['like'] = df_clean['like'].astype(bool)

        # 分类变量转换（优化内存使用）
        categorical_cols = ['uid', 'user_city', 'item_id', 'author_id', 'item_city', 'channel', 'music_id', 'did']
        for col in categorical_cols:
            if col in df_clean.columns:
                df_clean[col] = df_clean[col].astype('category')

        return df_clean

    # 4. 去重处理
    def remove_duplicates(df):
        """去除重复记录"""
        original_len = len(df)

        # 基于用户-作品对去重，保留第一条记录
        df_clean = df.drop_duplicates(subset=['uid', 'item_id'], keep='first')

        duplicate_count = original_len - len(df_clean)
        print(f"去除重复记录: {duplicate_count} ({duplicate_count / original_len * 100:.2f}%)")

        return df_clean

    # 执行清洗流程
    print("\n--- 训练集清洗 ---")
    train_clean = handle_missing_values(train_df, is_train=True)
    train_clean = handle_outliers(train_clean)
    train_clean = convert_data_types(train_clean, is_train=True)
    train_clean = remove_duplicates(train_clean)

    print("\n--- 测试集清洗 ---")
    test_clean = handle_missing_values(test_df, is_train=False)
    # 测试集不过滤异常值，因为需要保持数据完整性进行预测
    test_clean = convert_data_types(test_clean, is_train=False)
    test_clean = remove_duplicates(test_clean)

    print(f"\n清洗完成!")
    print(f"训练集: {len(train_df)} -> {len(train_clean)} 条记录")
    print(f"测试集: {len(test_df)} -> {len(test_clean)} 条记录")

    return train_clean, test_clean




def main():
    """数据预处理与清洗主流程"""
    print("开始数据预处理与清洗流程...")

    # 1. 加载数据和初步探索
    train_df, test_df, face_attrs, audio_features, title_features, video_features = data_loader_explorer.load_and_explore_data()

    # 详细探索原始数据
    print("\n正在探索原始数据...")
    data_loader_explorer.detailed_data_exploration(train_df, "原始训练集")
    data_loader_explorer.detailed_data_exploration(test_df, "原始测试集")

    # 2. 数据清洗
    train_clean, test_clean = clean_data(train_df, test_df)

    # 3. 生成数据质量报告
    quality_report = data_loader_explorer.generate_data_quality_report(
        train_clean, test_clean, face_attrs, audio_features, title_features, video_features
    )

    print("\n" + "=" * 50)
    print("数据预处理与清洗完成!")
    print("=" * 50)

    # 保存清洗后的数据（可选）
    train_clean.to_csv('cleaned_train_data.csv', index=False)
    test_clean.to_csv('cleaned_test_data.csv', index=False)

    return train_clean, test_clean, quality_report


# 运行主流程
if __name__ == "__main__":
    train_clean, test_clean, quality_report = main()