import os
import json
from transformers import AutoTokenizer
import argparse

# 载入 Qwen2.5-32B 的分词器
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-32B")

# 设置命令行参数
parser = argparse.ArgumentParser(description="过滤 JSONL 文件中的数据")
parser.add_argument("--input_path", '-i', type=str, required=True, help="输入 JSONL 文件路径")
parser.add_argument("--min_token_count", '-min', type=int, default=50, help="最小 token 数")
parser.add_argument("--max_token_count", '-max', type=int, default=1000, help="最大 token 数")
parser.add_argument("--column", '-c', type=str, default="input", help="要过滤的列名")
args = parser.parse_args()

# 使用命令行参数
input_path = args.input_path
min_token_count = args.min_token_count
max_token_count = args.max_token_count

base_dir = os.path.dirname(input_path)
filename = os.path.basename(input_path)
file_prefix = filename[:4]
file_prefix = file_prefix.replace('_all', '')

tmp_filtered_path = os.path.join(base_dir, f'{file_prefix}_filtered_tmp.jsonl')
tmp_remain_path = os.path.join(base_dir, f'{file_prefix}_remain_tmp.jsonl')

total_count = 0
filtered_count = 0
remain_count = 0

with open(input_path, 'r', encoding='utf-8') as infile, \
     open(tmp_filtered_path, 'w', encoding='utf-8') as filtered_file, \
     open(tmp_remain_path, 'w', encoding='utf-8') as remain_file:
    for line in infile:
        try:
            data = json.loads(line)
        except Exception:
            continue
        total_count += 1
        if args.column == "all":
            code = data.get('instruction', '') + data.get('input', '') + data.get('output', '')
        elif args.column in data:
            code = data[args.column].replace(" ", "").replace("\\n", "")
        else:
            print(f"Error: Column '{args.column}' not found in data, break.")
            break
        token_ids = tokenizer.encode(str(code), add_special_tokens=False)
        token_count = len(token_ids)
        if min_token_count <= token_count <= max_token_count and len(code.replace(" ", "").replace("\\n", "")) > 30:
            filtered_file.write(json.dumps(data, ensure_ascii=False) + '\n')
            filtered_count += 1
        elif token_count <= 4000:
            remain_file.write(json.dumps(data, ensure_ascii=False) + '\n')
            remain_count += 1

# 构造新文件名并重命名
if args.column == "input":
    filtered_path = os.path.join(base_dir, f'{file_prefix}_filtered_{max_token_count}.jsonl')
    remain_path = os.path.join(base_dir, f'{file_prefix}_remain_{max_token_count}.jsonl')
else:
    filtered_path = os.path.join(base_dir, f'{file_prefix}_{args.column}_filtered_{max_token_count}.jsonl')
    remain_path = os.path.join(base_dir, f'{file_prefix}_{args.column}_remain_{max_token_count}.jsonl')
os.rename(tmp_filtered_path, filtered_path)
os.rename(tmp_remain_path, remain_path)

print(f'原始条数: {total_count}')
print(f'过滤后token数在{min_token_count}-{max_token_count}之间: {filtered_count}')
print(f'其余: {remain_count}')
print(f'已生成文件: {filtered_path}')
print(f'已生成文件: {remain_path}')