import json
import tiktoken  # 需要先安装: pip install tiktoken


def count_tokens(text, encoding_name="cl100k_base"):
    """使用tiktoken计算文本的token数量"""
    encoding = tiktoken.get_encoding(encoding_name)
    num_tokens = len(encoding.encode(text))
    return num_tokens


def analyze_token_counts(json_file):
    """分析JSON文件中prompt的token数量分布"""
    # 读取JSON文件
    with open(json_file, 'r', encoding='utf-8') as f:
        data = json.load(f)

    # 初始化计数器
    total_count = len(data)
    count_over_2048 = 0
    count_over_3072 = 0

    # 分析每个prompt的token数量
    prompt_lengths = []
    for i, item in enumerate(data, 1):
        prompt = item["prompt"]
        token_count = count_tokens(prompt)
        prompt_lengths.append(token_count)

        # 更新计数器
        if token_count > 2048:
            count_over_2048 += 1
        if token_count > 3072:
            count_over_3072 += 1

        # 打印进度
        if i % 100 == 0:
            print(f"已处理 {i}/{total_count} 条记录")

    # 计算统计信息
    max_tokens = max(prompt_lengths) if prompt_lengths else 0
    min_tokens = min(prompt_lengths) if prompt_lengths else 0
    avg_tokens = sum(prompt_lengths) / len(prompt_lengths) if prompt_lengths else 0

    # 打印结果
    print("\n===== Token统计结果 =====")
    print(f"总记录数: {total_count}")
    print(f"Token最大值: {max_tokens}")
    print(f"Token最小值: {min_tokens}")
    print(f"Token平均值: {avg_tokens:.2f}")
    print(f"Token超过2048的记录数: {count_over_2048} (占比: {count_over_2048 / total_count * 100:.2f}%)")
    print(f"Token超过3072的记录数: {count_over_3072} (占比: {count_over_3072 / total_count * 100:.2f}%)")

    # 返回结果
    return {
        "total_count": total_count,
        "max_tokens": max_tokens,
        "min_tokens": min_tokens,
        "avg_tokens": avg_tokens,
        "count_over_2048": count_over_2048,
        "count_over_3072": count_over_3072
    }


if __name__ == "__main__":
    # 替换为你的JSON文件路径
    json_file_path = "10-23.json"

    # 运行分析
    results = analyze_token_counts(json_file_path)

