import json
from transformers import AutoTokenizer
from tqdm import tqdm


def filter_by_token_limit(input_file, output_file, max_tokens=3000):
    """
    过滤JSON文件，只保留token数量不超过max_tokens的记录
    :param input_file: 输入JSON文件路径
    :param output_file: 输出JSON文件路径
    :param max_tokens: token数量上限
    """
    try:
        print("正在加载Qwen3-8B tokenizer...")
        tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-8B", trust_remote_code=True)
        print("tokenizer加载成功")
    except Exception as e:
        print(f"无法加载Qwen tokenizer: {e}")
        print("使用替代tokenizer...")
        tokenizer = None

        # 尝试使用其他替代tokenizer
        try:
            from transformers import BertTokenizer
            tokenizer = BertTokenizer.from_pretrained("bert-base-chinese")
            print("使用bert-base-chinese作为替代")
        except:
            print("无法加载任何tokenizer，将使用近似估算")

    # 读取原始JSON文件
    print(f"读取原始文件: {input_file}")
    with open(input_file, "r", encoding="utf-8") as f:
        data = json.load(f)

    print(f"找到 {len(data)} 条记录，开始过滤...")

    # 初始化统计变量
    stats = {
        "total_original": len(data),
        "total_remaining": 0,
        "max_tokens": 0,
        "min_tokens": float("inf"),
        "avg_tokens": 0.0,
        "filtered_count": 0,
        "token_counts": []
    }
    total_tokens = 0
    filtered_data = []

    # 处理每个条目
    for item in tqdm(data, desc="处理进度"):
        prompt = item.get("prompt", "")
        if not isinstance(prompt, str):
            continue

        # 计算token数量
        if tokenizer:
            tokens = tokenizer.encode(prompt, add_special_tokens=False)
            token_count = len(tokens)
        else:
            # 简单估算token数量（中文通常为字符数的0.5-2倍）
            token_count = len(prompt) * 1.5

        # 更新统计信息
        stats["token_counts"].append(token_count)
        total_tokens += token_count

        if token_count > stats["max_tokens"]:
            stats["max_tokens"] = token_count
        if token_count < stats["min_tokens"]:
            stats["min_tokens"] = token_count

        # 过滤条件
        if token_count <= max_tokens:
            filtered_data.append(item)
            stats["total_remaining"] += 1
        else:
            stats["filtered_count"] += 1

    # 计算平均值
    if stats["total_original"] > 0:
        stats["avg_tokens"] = round(total_tokens / stats["total_original"], 2)

    # 保存过滤后的数据
    print(f"保存过滤后的数据到: {output_file}")
    with open(output_file, "w", encoding="utf-8") as f:
        json.dump(filtered_data, f, ensure_ascii=False, indent=2)

    return stats


def print_stats(stats: dict):
    """打印统计结果"""
    if not stats:
        return

    print("\n===== 数据过滤统计 =====")
    print(f"原始记录数: {stats['total_original']}")
    print(
        f"保留记录数: {stats['total_remaining']} (占比: {stats['total_remaining'] / stats['total_original'] * 100:.2f}%)")
    print(
        f"过滤记录数: {stats['filtered_count']} (占比: {stats['filtered_count'] / stats['total_original'] * 100:.2f}%)")
    print(f"Token最大值: {stats['max_tokens']}")
    print(f"Token最小值: {stats['min_tokens']}")
    print(f"Token平均值: {stats['avg_tokens']}")

    # 打印token分布信息
    if stats['token_counts']:
        counts = stats['token_counts']
        counts.sort()
        print(f"Token分布: 10% = {counts[int(len(counts) * 0.1)]}, "
              f"25% = {counts[int(len(counts) * 0.25)]}, "
              f"50% = {counts[int(len(counts) * 0.5)]}, "
              f"75% = {counts[int(len(counts) * 0.75)]}, "
              f"90% = {counts[int(len(counts) * 0.9)]}")


if __name__ == "__main__":
    # 输入文件路径
    input_json = "10-23.json"
    # 输出文件路径
    output_json = "filtered_data1.json"
    # token限制
    max_token_limit = 2048

    # 过滤数据
    stats = filter_by_token_limit(input_json, output_json, max_token_limit)

    # 打印统计结果
    if stats:
        print_stats(stats)
    else:
        print("处理过程中发生错误")