#!/usr/bin/env python3

import json
from transformers import AutoTokenizer
import os

def analyze_data_length():
    """分析训练数据的长度分布"""
    # 加载分词器
    model_path = "models/Qwen1.5-1.8B"
    if os.path.exists(model_path):
        tokenizer = AutoTokenizer.from_pretrained(model_path, local_files_only=True)
    else:
        print(f"未找到本地模型 {model_path}，使用在线分词器")
        tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen1.5-1.8B")
    
    # 读取训练数据
    data_file = "training/data/tianqi_jiyuan_training_data.jsonl"
    lengths = []
    
    print("分析训练数据长度分布...")
    with open(data_file, 'r', encoding='utf-8') as f:
        for i, line in enumerate(f):
            data = json.loads(line.strip())
            prompt = data.get('prompt', '')
            response = data.get('response', '')
            
            # 构造完整输入（模拟训练时的格式）
            full_text = prompt + response
            tokens = tokenizer.encode(full_text)
            token_count = len(tokens)
            lengths.append(token_count)
            
            print(f"样本 {i+1}: {token_count} tokens")
            print(f"  Prompt: {prompt[:50]}...")
            print(f"  Response: {response[:50]}...")
            print()
    
    # 统计信息
    if lengths:
        avg_length = sum(lengths) / len(lengths)
        max_length = max(lengths)
        min_length = min(lengths)
        
        print(f"统计信息:")
        print(f"  平均长度: {avg_length:.1f} tokens")
        print(f"  最大长度: {max_length} tokens")
        print(f"  最小长度: {min_length} tokens")
        print(f"  样本总数: {len(lengths)}")
        
        # 分析截断影响
        print(f"\n截断影响分析:")
        for max_len in [128, 256, 512]:
            truncated_count = sum(1 for l in lengths if l > max_len)
            truncation_rate = (truncated_count / len(lengths)) * 100
            print(f"  max_length={max_len}: {truncated_count}/{len(lengths)} ({truncation_rate:.1f}%) 样本会被截断")

if __name__ == "__main__":
    analyze_data_length()
