#!/usr/bin/env python3
"""
Reranker模型Token长度分析脚本
分析法律文本数据集中query-document pair的token长度分布
"""

import json
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from transformers import AutoTokenizer
from pathlib import Path
import argparse
from typing import List, Dict, Tuple
import pandas as pd

class RerankerTokenLengthAnalyzer:
    def __init__(self, tokenizer_name: str = "Qwen/Qwen3-Reranker-0.6B"):
        """初始化分析器"""
        self.tokenizer = AutoTokenizer.from_pretrained(tokenizer_name)
        self.query_lengths = []
        self.positive_doc_lengths = []
        self.negative_doc_lengths = []
        self.combined_lengths = []  # query + document 组合长度
        
    def analyze_dataset(self, data_path: str) -> Dict:
        """分析数据集中的token长度"""
        print(f"正在分析数据集: {data_path}")
        
        with open(data_path, 'r', encoding='utf-8') as f:
            for line_num, line in enumerate(f, 1):
                try:
                    data = json.loads(line.strip())
                    
                    # 分析query长度
                    query = data.get('query', '')
                    query_tokens = self.tokenizer.encode(query)
                    query_len = len(query_tokens)
                    self.query_lengths.append(query_len)
                    
                    # 分析positive document长度
                    if 'positive_doc' in data:
                        pos_doc = data['positive_doc']
                        pos_doc_tokens = self.tokenizer.encode(pos_doc)
                        pos_doc_len = len(pos_doc_tokens)
                        self.positive_doc_lengths.append(pos_doc_len)
                        
                        # 分析组合长度 (query + positive_doc)
                        combined_text = f"{query} [SEP] {pos_doc}"
                        combined_tokens = self.tokenizer.encode(combined_text)
                        self.combined_lengths.append(len(combined_tokens))
                    
                    # 分析negative documents长度
                    if 'negative_docs' in data:
                        for neg_doc in data['negative_docs']:
                            neg_doc_tokens = self.tokenizer.encode(neg_doc)
                            self.negative_doc_lengths.append(len(neg_doc_tokens))
                        
                    if line_num % 1000 == 0:
                        print(f"已处理 {line_num} 条数据")
                        
                except json.JSONDecodeError:
                    print(f"第 {line_num} 行JSON解析错误，跳过")
                    continue
                except Exception as e:
                    print(f"第 {line_num} 行处理错误: {e}")
                    continue
        
        return self._calculate_statistics()
    
    def _calculate_statistics(self) -> Dict:
        """计算统计信息"""
        stats = {}
        
        # Query统计
        if self.query_lengths:
            stats['query'] = self._get_length_stats(self.query_lengths)
        
        # Positive document统计
        if self.positive_doc_lengths:
            stats['positive_document'] = self._get_length_stats(self.positive_doc_lengths)
        
        # Negative document统计
        if self.negative_doc_lengths:
            stats['negative_document'] = self._get_length_stats(self.negative_doc_lengths)
        
        # Combined length统计
        if self.combined_lengths:
            stats['combined'] = self._get_length_stats(self.combined_lengths)
        
        return stats
    
    def _get_length_stats(self, lengths: List[int]) -> Dict:
        """计算长度统计信息"""
        return {
            'count': len(lengths),
            'mean': np.mean(lengths),
            'std': np.std(lengths),
            'min': np.min(lengths),
            'max': np.max(lengths),
            'percentiles': {
                '25%': np.percentile(lengths, 25),
                '50%': np.percentile(lengths, 50),
                '75%': np.percentile(lengths, 75),
                '90%': np.percentile(lengths, 90),
                '95%': np.percentile(lengths, 95),
                '99%': np.percentile(lengths, 99)
            }
        }
    
    def plot_distribution(self, output_path: str = "assets/figures/evalRerankerTokenLength.jpg"):
        """绘制token长度分布图"""
        plt.style.use('seaborn-v0_8')
        fig, axes = plt.subplots(2, 3, figsize=(18, 12))
        fig.suptitle('Reranker模型Token长度分布分析', fontsize=16, fontweight='bold')
        
        # Query长度分布
        if self.query_lengths:
            axes[0, 0].hist(self.query_lengths, bins=30, alpha=0.7, color='skyblue', edgecolor='black')
            axes[0, 0].set_title('Query Token长度分布')
            axes[0, 0].set_xlabel('Token长度')
            axes[0, 0].set_ylabel('频次')
            axes[0, 0].axvline(np.mean(self.query_lengths), color='red', linestyle='--',
                              label=f'均值: {np.mean(self.query_lengths):.1f}')
            axes[0, 0].legend()
        
        # Positive Document长度分布
        if self.positive_doc_lengths:
            axes[0, 1].hist(self.positive_doc_lengths, bins=30, alpha=0.7, color='lightgreen', edgecolor='black')
            axes[0, 1].set_title('Positive Document Token长度分布')
            axes[0, 1].set_xlabel('Token长度')
            axes[0, 1].set_ylabel('频次')
            axes[0, 1].axvline(np.mean(self.positive_doc_lengths), color='red', linestyle='--',
                              label=f'均值: {np.mean(self.positive_doc_lengths):.1f}')
            axes[0, 1].legend()
        
        # Negative Document长度分布
        if self.negative_doc_lengths:
            axes[0, 2].hist(self.negative_doc_lengths, bins=30, alpha=0.7, color='lightcoral', edgecolor='black')
            axes[0, 2].set_title('Negative Document Token长度分布')
            axes[0, 2].set_xlabel('Token长度')
            axes[0, 2].set_ylabel('频次')
            axes[0, 2].axvline(np.mean(self.negative_doc_lengths), color='red', linestyle='--',
                              label=f'均值: {np.mean(self.negative_doc_lengths):.1f}')
            axes[0, 2].legend()
        
        # Combined长度分布
        if self.combined_lengths:
            axes[1, 0].hist(self.combined_lengths, bins=30, alpha=0.7, color='gold', edgecolor='black')
            axes[1, 0].set_title('Combined (Query+Doc) Token长度分布')
            axes[1, 0].set_xlabel('Token长度')
            axes[1, 0].set_ylabel('频次')
            axes[1, 0].axvline(np.mean(self.combined_lengths), color='red', linestyle='--',
                              label=f'均值: {np.mean(self.combined_lengths):.1f}')
            axes[1, 0].axvline(512, color='orange', linestyle='-', linewidth=2,
                              label='Max Length: 512')
            axes[1, 0].legend()
        
        # 长度对比箱线图
        data_for_boxplot = []
        labels = []
        if self.query_lengths:
            data_for_boxplot.append(self.query_lengths)
            labels.append('Query')
        if self.positive_doc_lengths:
            data_for_boxplot.append(self.positive_doc_lengths)
            labels.append('Positive Doc')
        if self.negative_doc_lengths:
            data_for_boxplot.append(self.negative_doc_lengths)
            labels.append('Negative Doc')
        
        if data_for_boxplot:
            axes[1, 1].boxplot(data_for_boxplot, labels=labels)
            axes[1, 1].set_title('Token长度对比 (箱线图)')
            axes[1, 1].set_ylabel('Token长度')
            axes[1, 1].grid(True, alpha=0.3)
        
        # Combined长度累积分布
        if self.combined_lengths:
            sorted_combined = np.sort(self.combined_lengths)
            y = np.arange(1, len(sorted_combined) + 1) / len(sorted_combined)
            axes[1, 2].plot(sorted_combined, y, color='purple', linewidth=2)
            axes[1, 2].set_title('Combined Token长度累积分布')
            axes[1, 2].set_xlabel('Token长度')
            axes[1, 2].set_ylabel('累积概率')
            axes[1, 2].grid(True, alpha=0.3)
            
            # 添加512长度线
            axes[1, 2].axvline(512, color='red', linestyle='--', linewidth=2,
                              label='Max Length: 512')
            
            # 计算512长度覆盖率
            coverage = np.mean(np.array(self.combined_lengths) <= 512)
            axes[1, 2].text(0.05, 0.95, f'512长度覆盖率: {coverage:.2%}',
                           transform=axes[1, 2].transAxes, fontsize=12,
                           bbox=dict(boxstyle="round,pad=0.3", facecolor="yellow", alpha=0.7))
            axes[1, 2].legend()
        
        plt.tight_layout()
        
        # 确保输出目录存在
        Path(output_path).parent.mkdir(parents=True, exist_ok=True)
        plt.savefig(output_path, dpi=300, bbox_inches='tight')
        print(f"图表已保存到: {output_path}")
        plt.show()
    
    def save_statistics(self, stats: Dict, output_path: str = "evaluation/results/reranker_token_stats.json"):
        """保存统计结果"""
        Path(output_path).parent.mkdir(parents=True, exist_ok=True)
        with open(output_path, 'w', encoding='utf-8') as f:
            json.dump(stats, f, ensure_ascii=False, indent=2)
        print(f"统计结果已保存到: {output_path}")

def main():
    parser = argparse.ArgumentParser(description="分析Reranker模型的Token长度分布")
    parser.add_argument("--data_path", type=str, required=True,
                       help="数据集文件路径")
    parser.add_argument("--tokenizer", type=str, default="Qwen/Qwen3-Reranker-0.6B",
                       help="Tokenizer名称")
    parser.add_argument("--output_dir", type=str, default="evaluation/results",
                       help="输出目录")
    
    args = parser.parse_args()
    
    # 创建分析器
    analyzer = RerankerTokenLengthAnalyzer(args.tokenizer)
    
    # 分析数据集
    stats = analyzer.analyze_dataset(args.data_path)
    
    # 打印统计信息
    print("\n=== Token长度统计信息 ===")
    for data_type, stat in stats.items():
        print(f"\n{data_type.upper()}:")
        print(f"  数量: {stat['count']}")
        print(f"  均值: {stat['mean']:.2f}")
        print(f"  标准差: {stat['std']:.2f}")
        print(f"  最小值: {stat['min']}")
        print(f"  最大值: {stat['max']}")
        print("  百分位数:")
        for p, v in stat['percentiles'].items():
            print(f"    {p}: {v:.0f}")
    
    # 绘制分布图
    analyzer.plot_distribution()
    
    # 保存统计结果
    analyzer.save_statistics(stats, f"{args.output_dir}/reranker_token_stats.json")

if __name__ == "__main__":
    main()
