import glob
import os


def deduplicate_by_bvid_per_file(input_dir, output_dir):
    # 创建输出目录（如果不存在）
    os.makedirs(output_dir, exist_ok=True)

    # 获取input_dir目录下的所有文件
    file_pattern = os.path.join(input_dir, "*")
    files = glob.glob(file_pattern)

    # 遍历所有文件
    for file_path in files:
        # 只处理文件，跳过目录
        if not os.path.isfile(file_path):
            continue

        # 使用字典存储当前文件的数据，bvid作为键，确保唯一性
        bvid_data = {}
        file_name = os.path.basename(file_path)
        output_file = os.path.join(
            output_dir, f"{os.path.splitext(file_name)[0]}_set.txt"
        )

        try:
            with open(file_path, "r", encoding="utf-8") as f:
                for line in f:
                    line = line.strip()
                    if not line:  # 跳过空行
                        continue

                    # 提取bvid（假设bvid是行中第一个以BV开头的字符串）
                    parts = line.split()
                    if not parts:
                        continue

                    bvid = parts[0]
                    if bvid.startswith("BV"):
                        # 如果bvid已存在则跳过，否则添加到字典
                        if bvid not in bvid_data:
                            bvid_data[bvid] = line

            # 将当前文件的去重结果写入对应的输出文件
            with open(output_file, "w", encoding="utf-8") as f:
                for line in bvid_data.values():
                    f.write(line + "\n")

            print(f"文件 {file_name} 去重完成，保留 {len(bvid_data)} 条唯一记录")
            print(f"结果已保存到 {output_file}")

        except Exception as e:
            print(f"处理文件 {file_path} 时出错: {e}")


def main():
    # 输入目录为当前目录下的data文件夹
    input_directory = "./data"
    # 输出目录为当前目录下的set文件夹
    output_directory = "./set"

    deduplicate_by_bvid_per_file(input_directory, output_directory)


if __name__ == "__main__":
    main()
