import os
import numpy as np
from transformers import BertTokenizerFast
from tqdm import tqdm

def analyze_lengths(txt_path):
    # 初始化 tokenizer
    current_dir = os.path.dirname(os.path.abspath(__file__))
    tokenizer = BertTokenizerFast(os.path.join(current_dir, '../vocab/vocab.txt'),
                                   sep_token="[SEP]",
                                   pad_token="[PAD]",
                                   cls_token="[CLS]")

    sep_id = tokenizer.sep_token_id
    cls_id = tokenizer.cls_token_id

    # 读取数据
    with open(txt_path, 'r', encoding='utf-8') as f:
        data = f.read()

    # 拆分问答段落 windows换行是\n，linux是\r\n
    if "\r\n" in data:
        dialogues = data.split("\r\n\r\n")
    else:
        dialogues = data.split("\n\n")

    print(f"总对话数: {len(dialogues)}")

    # 统计长度
    lengths = []

    for dialogue in tqdm(dialogues):
        if "\r\n" in dialogue:
            sequences = dialogue.strip().split("\r\n")
        else:
            sequences = dialogue.strip().split("\n")

        input_ids = [cls_id]
        for sequence in sequences:
            input_ids += tokenizer.encode(sequence, add_special_tokens=False)
            input_ids.append(sep_id)
        lengths.append(len(input_ids))

    # 统计信息
    lengths = np.array(lengths)
    print("\n=== Token 长度统计 ===")
    print(f"样本总数: {len(lengths)}")
    print(f"最小长度: {lengths.min()}")
    print(f"最大长度: {lengths.max()}")
    print(f"平均长度: {lengths.mean():.2f}")
    print(f"中位数长度: {np.median(lengths)}")
    print(f"90分位长度: {np.percentile(lengths, 90)}")
    print(f"95分位长度: {np.percentile(lengths, 95)}")
    print(f"99分位长度: {np.percentile(lengths, 99)}")

if __name__ == '__main__':
    # txt_path = "../data/medical_train.txt"
    txt_path = "../data/chat_train.pkl"
    current_dir = os.path.dirname(os.path.abspath(__file__))
    txt_path = os.path.join(current_dir, txt_path)

    analyze_lengths(txt_path)

# 打印结果 "../data/medical_train.txt"
# === Token 长度统计 ===        
# 样本总数: 30177
# 最小长度: 2
# 最大长度: 301
# 平均长度: 113.45
# 中位数长度: 72.0
# 90分位长度: 250.40000000000146
# 95分位长度: 272.0
# 99分位长度: 293.0


# 打印结果 "../data/chat_train.pkl"
# === Token 长度统计 ===
# 样本总数: 500001
# 最小长度: 2
# 最大长度: 1855
# 平均长度: 47.15
# 中位数长度: 36.0
# 90分位长度: 84.0
# 95分位长度: 112.0
# 99分位长度: 203.0