| import os |
| import json |
| import re |
| import shutil |
| from collections import defaultdict |
|
|
| |
|
|
| |
| LOG_FILE = 'copy_errors.log' |
|
|
| |
| QUESTION_ROOT = 'data/questions' |
|
|
| |
| |
| |
| TARGET_CATEGORY = None |
| TARGET_SEQUENCE = None |
|
|
| |
|
|
| def parse_log_and_get_ids(log_file, target_cat=None, target_seq=None): |
| """ |
| 解析日志文件,获取需要删除的 {文件名: {ID集合}} |
| 支持按 category 和 sequence 过滤 |
| """ |
| if not os.path.exists(log_file): |
| print(f"[Error] 找不到日志文件: {log_file}") |
| return {} |
|
|
| print(f"[*] 正在解析日志: {log_file} ...") |
| if target_cat: |
| print(f" -> 过滤条件 Category: {target_cat}") |
| if target_seq: |
| print(f" -> 过滤条件 Sequence: {target_seq}") |
|
|
| |
| files_to_clean = defaultdict(set) |
| |
| |
| |
| path_pattern = re.compile(r'data/raw_images/([^/]+)/([^/]+)/images/') |
|
|
| current_category = None |
| current_sequence = None |
| is_target_block = False |
|
|
| with open(log_file, 'r', encoding='utf-8') as f: |
| for line in f: |
| line = line.strip() |
| |
| |
| if line.startswith("[FAIL] 图片路径:"): |
| path_str = line.split(": ", 1)[1] |
| match = path_pattern.search(path_str) |
| |
| if match: |
| current_category = match.group(1) |
| current_sequence = match.group(2) |
| |
| |
| cat_match = (target_cat is None) or (current_category == target_cat) |
| seq_match = (target_seq is None) or (current_sequence == target_sequence) |
| |
| is_target_block = cat_match and seq_match |
| else: |
| |
| is_target_block = False |
|
|
| |
| |
| elif "-> ID:" in line: |
| if is_target_block: |
| try: |
| |
| parts = line.split("-> ID:") |
| filename = parts[0].strip() |
| q_id = parts[1].strip() |
| |
| files_to_clean[filename].add(q_id) |
| except IndexError: |
| pass |
|
|
| total_ids = sum(len(ids) for ids in files_to_clean.values()) |
| print(f"[*] 解析完成。共发现 {len(files_to_clean)} 个文件中的 {total_ids} 个问题需要删除。") |
| return files_to_clean |
|
|
| def clean_jsonl_files(question_root, files_to_clean): |
| """ |
| 遍历目录,找到对应的 jsonl 文件并删除指定 ID 的行 |
| """ |
| if not files_to_clean: |
| print("[*] 没有需要删除的内容。") |
| return |
|
|
| print(f"[*] 开始扫描目录 {question_root} 并执行删除操作...") |
| |
| modified_count = 0 |
| deleted_lines_count = 0 |
|
|
| |
| for root, dirs, files in os.walk(question_root): |
| for file in files: |
| |
| if file in files_to_clean: |
| file_path = os.path.join(root, file) |
| ids_to_remove = files_to_clean[file] |
| |
| |
| removed = process_single_file(file_path, ids_to_remove) |
| |
| if removed > 0: |
| modified_count += 1 |
| deleted_lines_count += removed |
| |
| |
| |
| |
|
|
| print("\n" + "="*30) |
| print("清理任务完成 Summary:") |
| print(f"修改文件数: {modified_count}") |
| print(f"删除问题数: {deleted_lines_count}") |
| print("="*30) |
|
|
| def process_single_file(file_path, ids_to_remove): |
| """ |
| 读取文件,过滤掉在 ids_to_remove 中的行,重写文件 |
| """ |
| temp_file = file_path + '.tmp' |
| removed_count = 0 |
| |
| try: |
| with open(file_path, 'r', encoding='utf-8') as f_in, \ |
| open(temp_file, 'w', encoding='utf-8') as f_out: |
| |
| for line in f_in: |
| line = line.strip() |
| if not line: |
| continue |
| |
| should_delete = False |
| try: |
| data = json.loads(line) |
| if data.get('id') in ids_to_remove: |
| should_delete = True |
| except json.JSONDecodeError: |
| pass |
| |
| if should_delete: |
| removed_count += 1 |
| else: |
| f_out.write(line + '\n') |
| |
| |
| if removed_count > 0: |
| shutil.move(temp_file, file_path) |
| print(f" [Cleaned] {os.path.basename(file_path)}: 删除了 {removed_count} 行") |
| else: |
| |
| os.remove(temp_file) |
| |
| except Exception as e: |
| print(f"[!] 处理文件 {file_path} 时出错: {e}") |
| if os.path.exists(temp_file): |
| os.remove(temp_file) |
| return 0 |
|
|
| return removed_count |
|
|
| if __name__ == "__main__": |
| |
| clean_map = parse_log_and_get_ids(LOG_FILE, TARGET_CATEGORY, TARGET_SEQUENCE) |
| |
| |
| if clean_map: |
| |
| confirm = input(f"警告: 即将从原始 jsonl 文件中永久删除数据。\n输入 'yes' 确认执行: ") |
| if confirm.lower() == 'yes': |
| clean_jsonl_files(QUESTION_ROOT, clean_map) |
| else: |
| print("[*] 操作已取消。") |
|
|