interleaved-umm / scripts /remove.py
Caesarrr's picture
Add files using upload-large-folder tool
18309e8 verified
import os
import json
import re
import shutil
from collections import defaultdict
# ===================== 配置区域 =====================
# 1. 错误日志文件路径
LOG_FILE = 'copy_errors.log'
# 2. JSONL 文件的根目录 (脚本会去这里找对应的 jsonl 文件进行修改)
QUESTION_ROOT = 'data/questions'
# 3. 过滤条件 (可选)
# 如果只想删除特定 category 或 sequence 的问题,请在此填入字符串。
# 如果想处理日志中所有记录的错误,请设置为 None
TARGET_CATEGORY = None # 例如: 'bowl' 或 None
TARGET_SEQUENCE = None # 例如: '70_6141_14002' 或 None
# ===================================================
def parse_log_and_get_ids(log_file, target_cat=None, target_seq=None):
"""
解析日志文件,获取需要删除的 {文件名: {ID集合}}
支持按 category 和 sequence 过滤
"""
if not os.path.exists(log_file):
print(f"[Error] 找不到日志文件: {log_file}")
return {}
print(f"[*] 正在解析日志: {log_file} ...")
if target_cat:
print(f" -> 过滤条件 Category: {target_cat}")
if target_seq:
print(f" -> 过滤条件 Sequence: {target_seq}")
# 存储结构: { 'train_2.jsonl': {'task3_id_1', 'task3_id_2'}, ... }
files_to_clean = defaultdict(set)
# 正则用于提取路径中的 category 和 sequence
# 假设路径结构: .../raw_images/{category}/{sequence}/images/...
path_pattern = re.compile(r'data/raw_images/([^/]+)/([^/]+)/images/')
current_category = None
current_sequence = None
is_target_block = False # 标记当前错误块是否符合过滤条件
with open(log_file, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip()
# 1. 识别错误块的开始 (提取图片路径信息)
if line.startswith("[FAIL] 图片路径:"):
path_str = line.split(": ", 1)[1]
match = path_pattern.search(path_str)
if match:
current_category = match.group(1)
current_sequence = match.group(2)
# 判断是否符合用户过滤条件
cat_match = (target_cat is None) or (current_category == target_cat)
seq_match = (target_seq is None) or (current_sequence == target_sequence)
is_target_block = cat_match and seq_match
else:
# 如果路径解析失败,为了安全起见,默认不处理,或者你可以选择处理
is_target_block = False
# 2. 提取受影响的问题 ID
# 格式: train_2.jsonl -> ID:task3_209_22099_44906_71
elif "-> ID:" in line:
if is_target_block:
try:
# 分割文件名和ID
parts = line.split("-> ID:")
filename = parts[0].strip()
q_id = parts[1].strip()
files_to_clean[filename].add(q_id)
except IndexError:
pass
total_ids = sum(len(ids) for ids in files_to_clean.values())
print(f"[*] 解析完成。共发现 {len(files_to_clean)} 个文件中的 {total_ids} 个问题需要删除。")
return files_to_clean
def clean_jsonl_files(question_root, files_to_clean):
"""
遍历目录,找到对应的 jsonl 文件并删除指定 ID 的行
"""
if not files_to_clean:
print("[*] 没有需要删除的内容。")
return
print(f"[*] 开始扫描目录 {question_root} 并执行删除操作...")
modified_count = 0
deleted_lines_count = 0
# 遍历所有文件
for root, dirs, files in os.walk(question_root):
for file in files:
# 如果这个文件在我们的清理列表中
if file in files_to_clean:
file_path = os.path.join(root, file)
ids_to_remove = files_to_clean[file]
# 执行清理
removed = process_single_file(file_path, ids_to_remove)
if removed > 0:
modified_count += 1
deleted_lines_count += removed
# 从列表中移除已处理的文件(优化后续查找,虽然文件名可能重复,但这里假设文件名唯一对应任务)
# 注意:如果不同目录下有同名文件 (如 task1/train.jsonl, task2/train.jsonl),
# 且日志里只记了 "train.jsonl",我们需要对所有叫 train.jsonl 的都检查一遍 ID。
# 所以这里不从 files_to_clean 中 pop,而是让它继续检查。
print("\n" + "="*30)
print("清理任务完成 Summary:")
print(f"修改文件数: {modified_count}")
print(f"删除问题数: {deleted_lines_count}")
print("="*30)
def process_single_file(file_path, ids_to_remove):
"""
读取文件,过滤掉在 ids_to_remove 中的行,重写文件
"""
temp_file = file_path + '.tmp'
removed_count = 0
try:
with open(file_path, 'r', encoding='utf-8') as f_in, \
open(temp_file, 'w', encoding='utf-8') as f_out:
for line in f_in:
line = line.strip()
if not line:
continue
should_delete = False
try:
data = json.loads(line)
if data.get('id') in ids_to_remove:
should_delete = True
except json.JSONDecodeError:
pass
if should_delete:
removed_count += 1
else:
f_out.write(line + '\n')
# 如果有删除操作,则用新文件替换旧文件
if removed_count > 0:
shutil.move(temp_file, file_path)
print(f" [Cleaned] {os.path.basename(file_path)}: 删除了 {removed_count} 行")
else:
# 如果没变化,删除临时文件
os.remove(temp_file)
except Exception as e:
print(f"[!] 处理文件 {file_path} 时出错: {e}")
if os.path.exists(temp_file):
os.remove(temp_file)
return 0
return removed_count
if __name__ == "__main__":
# 1. 获取删除列表
clean_map = parse_log_and_get_ids(LOG_FILE, TARGET_CATEGORY, TARGET_SEQUENCE)
# 2. 执行删除
if clean_map:
# 二次确认防止误删
confirm = input(f"警告: 即将从原始 jsonl 文件中永久删除数据。\n输入 'yes' 确认执行: ")
if confirm.lower() == 'yes':
clean_jsonl_files(QUESTION_ROOT, clean_map)
else:
print("[*] 操作已取消。")