import pandas as pd
import numpy as np
import os

def read_csv_with_encoding(file_path):
    """尝试多种编码读取CSV文件，解决编码问题"""
    # 常见的编码列表，按优先级排序
    encodings = ['utf-8', 'gbk', 'gb2312', 'utf-16', 'latin-1']
    
    for encoding in encodings:
        try:
            return pd.read_csv(file_path, encoding=encoding)
        except UnicodeDecodeError:
            continue
        except Exception as e:
            print(f"使用{encoding}编码读取{file_path}时发生错误: {e}")
            continue
    
    # 如果所有编码都失败，尝试忽略错误的方式读取
    try:
        return pd.read_csv(file_path, encoding='utf-8', errors='ignore')
    except Exception as e:
        print(f"所有编码尝试失败，读取{file_path}时发生错误: {e}")
        raise

def clean_datasets(user_path, comments_path, movies_path, person_path, ratings_path, output_dir):
    """
    清洗五个数据集，保留10000个随机用户及相关的所有信息
    """
    # 创建输出目录（如果不存在）
    os.makedirs(output_dir, exist_ok=True)
    
    # 1. 读取用户数据集
    print("正在读取用户数据集...")
    user_df = read_csv_with_encoding(user_path)
    
    # 检查是否有足够的用户
    if len(user_df) < 10000:
        raise ValueError("用户数量不足10000，请检查数据集")
    
    # 2. 随机选择10000个用户
    print("正在随机选择10000个用户...")
    # 确保选择的是唯一用户ID
    selected_users = user_df['USER_MD5'].drop_duplicates().sample(n=10000, random_state=42).unique()
    
    # 3. 筛选用户数据集
    print("正在筛选用户数据集...")
    filtered_user = user_df[user_df['USER_MD5'].isin(selected_users)]
    
    # 4. 处理评论数据集
    print("正在处理评论数据集...")
    comments_df = read_csv_with_encoding(comments_path)
    # 假设评论数据集中有USER_MD5字段关联用户
    filtered_comments = comments_df[comments_df['USER_MD5'].isin(selected_users)]
    
    # 5. 处理评分数据集
    print("正在处理评分数据集...")
    ratings_df = read_csv_with_encoding(ratings_path)
    # 假设评分数据集中有USER_MD5字段关联用户
    filtered_ratings = ratings_df[ratings_df['USER_MD5'].isin(selected_users)]
    
    # 6. 获取与所选用户相关的电影ID
    print("正在获取相关电影ID...")
    # 从评论和评分中提取相关电影ID
    movie_ids_from_comments = filtered_comments['MOVIE_ID'].unique() if 'MOVIE_ID' in filtered_comments.columns else []
    movie_ids_from_ratings = filtered_ratings['MOVIE_ID'].unique() if 'MOVIE_ID' in filtered_ratings.columns else []
    
    # 合并并去重电影ID
    related_movie_ids = np.unique(np.concatenate([movie_ids_from_comments, movie_ids_from_ratings]))
    
    # 7. 筛选电影数据集
    print("正在筛选电影数据集...")
    movies_df = read_csv_with_encoding(movies_path)
    filtered_movies = movies_df[movies_df['MOVIE_ID'].isin(related_movie_ids)]
    
    # 8. 获取与相关电影有关的人物ID
    print("正在获取相关人物ID...")
    # 假设电影数据集中有PERSON_ID字段关联人物
    person_ids = filtered_movies['PERSON_ID'].unique() if 'PERSON_ID' in filtered_movies.columns else []
    
    # 9. 筛选人物数据集
    print("正在筛选人物数据集...")
    person_df = read_csv_with_encoding(person_path)
    filtered_person = person_df[person_df['PERSON_ID'].isin(person_ids)]
    
    # 10. 保存清洗后的数据集
    print("正在保存清洗后的数据集...")
    # 统一用UTF-8编码保存，方便后续处理
    filtered_user.to_csv(f"{output_dir}/user_filtered.csv", index=False, encoding='utf-8')
    filtered_comments.to_csv(f"{output_dir}/comments_filtered.csv", index=False, encoding='utf-8')
    filtered_movies.to_csv(f"{output_dir}/movies_filtered.csv", index=False, encoding='utf-8')
    filtered_person.to_csv(f"{output_dir}/person_filtered.csv", index=False, encoding='utf-8')
    filtered_ratings.to_csv(f"{output_dir}/ratings_filtered.csv", index=False, encoding='utf-8')
    
    print("数据清洗完成！")
    print(f"清洗后的数据量：")
    print(f"用户：{len(filtered_user)}")
    print(f"评论：{len(filtered_comments)}")
    print(f"电影：{len(filtered_movies)}")
    print(f"人物：{len(filtered_person)}")
    print(f"评分：{len(filtered_ratings)}")

# 使用示例
if __name__ == "__main__":
    # 输入文件路径（请根据实际情况修改）
    user_path = "user.csv"
    comments_path = "comments.csv"
    movies_path = "movies.csv"
    person_path = "person.csv"
    ratings_path = "ratings.csv"
    
    # 输出目录
    output_dir = "filtered_data"
    
    # 执行清洗
    clean_datasets(user_path, comments_path, movies_path, person_path, ratings_path, output_dir)
