interleaved-umm / scripts /run_cleaning.py
Caesarrr's picture
Add files using upload-large-folder tool
60fde3b verified
# scripts/run_cleaning.py
import argparse
import json
import os
import sys
from pathlib import Path
# 添加项目根目录到 path
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from src.llm_generation.vllm_client import VLLMClient
from src.llm_generation.cleaning_generator import DataCleaningGenerator
def scan_dataset(root_dir):
"""
扫描 data/original/${category}/${sequence} 结构
返回 entries 列表,包含 sequence 的绝对路径
"""
entries = []
base_path = Path(root_dir).resolve() # 获取绝对路径
if not base_path.exists():
print(f"Error: Path {root_dir} does not exist.")
return []
print(f"Scanning directory: {base_path} ...")
# 获取所有类别文件夹
categories = [d for d in base_path.iterdir() if d.is_dir()]
for cat_dir in categories:
category_name = cat_dir.name
# 获取该类别下的所有 sequence 文件夹
sequences = [s for s in cat_dir.iterdir() if s.is_dir()]
for seq_dir in sequences:
# 直接存绝对路径,避免拼接错误
entries.append({
"category": category_name,
"sequence_name": seq_dir.name,
"sequence_abs_path": str(seq_dir) # 关键修改:存绝对路径
})
print(f"Found {len(entries)} sequences across {len(categories)} categories.")
return entries
def save_jsonl(data, path):
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, 'w', encoding='utf-8') as f:
for item in data:
f.write(json.dumps(item, ensure_ascii=False) + '\n')
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--data_root", type=str, required=True, help="Path to 'data/original'")
parser.add_argument("--output_file", type=str, default="cleaning_report.jsonl")
parser.add_argument("--model", type=str, required=True, help="Local model path")
parser.add_argument("--tp_size", type=int, default=1)
args = parser.parse_args()
# 1. 扫描数据
entries = scan_dataset(args.data_root)
if not entries:
print("No entries found. Exiting.")
return
# 2. 初始化 Client
client = VLLMClient(
model_path=args.model,
tensor_parallel_size=args.tp_size,
gpu_memory_utilization=0.9
)
# 3. 初始化 Generator
# image_root 参数在这里其实已经没用了,因为我们改用了绝对路径,
# 但为了保持接口兼容或日志打印,可以传个空字符串或 None
generator = DataCleaningGenerator(
client=client,
image_root=None, # 修改:不再依赖 image_root 拼接
model_name=args.model,
sample_k=4
)
# 4. 运行清洗
results = generator.process_batch(entries)
# 5. 保存结果
save_jsonl(results, args.output_file)
# 6. 打印简报
# 过滤掉 error_no_images 的,只统计真正跑过的
valid_results = [r for r in results if r.get('status') != 'error_no_images']
mismatches = [r for r in valid_results if r.get('is_match') is False]
print(f"\n清洗完成!")
print(f"总扫描数: {len(results)}")
print(f"有效处理数: {len(valid_results)}")
print(f"疑似不匹配: {len(mismatches)}")
print(f"详细报告已保存至: {args.output_file}")
if mismatches:
print("\n示例不匹配项:")
print(json.dumps(mismatches[0], indent=2, ensure_ascii=False))
if __name__ == "__main__":
main()