import pandas as pd
from utils import (
    log_message
)



def create_train_test_data(df_data, random_state=6):
    
    log_message(f"以random_state为{random_state}开始构造训练集和测试集")
    # 确保每个用户至少有10部电影评分
    enough_movies_df = df_data.groupby('userId').filter(lambda x: len(x) >= 10)

    # 随机数种子：保证每次重复运行程序时抽取时，样本保持不变
    random_state = 42
    # 设定抽取 1000 用户
    selected_users = enough_movies_df['userId'].drop_duplicates().sample(n=1000, random_state=random_state)

    # 对每个被抽取的用户，找到他们的最新10条电影评分
    latest_user_ratings = (
        enough_movies_df[enough_movies_df['userId'].isin(selected_users)]
        .sort_values(by=['userId', 'timestamp'], ascending=[True, False])  # 按照用户ID升序，评分降序排列
        .groupby('userId')
        .sample(n=10, random_state=random_state)
    )

    # 确保每个被选中的用户确实得到了他们最新的10条电影评分，否则程序停止
    latest_user_ratings.groupby('userId').size().min() == 10

    # 对每个用户最新的10条评分进行切片，前9条作为训练集，后1条作为测试集
    user_set = latest_user_ratings.sort_values(by=['userId', 'timestamp'], ascending=[True, False])
    test_set = user_set.groupby('userId').head(1)
    train_set = user_set.drop(test_set.index)

    # 输出处理后的数据集
    train_set.to_csv(f"../data/llm-pretrain-data/train_set_random_state_{random_state}_dataset_1k.csv", index=False)
    test_set.to_csv(f"../data/llm-pretrain-data/test_set_random_state_{random_state}_dataset_1k.csv", index=False)
    log_message("构造完成")


def create():
    log_message("加载数据中")
    # 读取电影评分数据
    ratings_df = pd.read_csv("../data/llm-pretrain-data/ratings_title.csv", sep=",", low_memory=False)
    log_message("数据加载完成")
    # create_train_test_data(df_data=ratings_df, random_state=6)
    create_train_test_data(df_data=ratings_df, random_state=42)

create()
