#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
详细分析每个切片策略的问题
"""

import sys
import os
import json
from datetime import datetime
from collections import Counter

# 添加项目路径
sys.path.append(os.path.dirname(os.path.abspath(__file__)))

from src.core.chunking_engine import ChunkingEngine
from src.core.document_processor import DocumentProcessor

def analyze_strategy_issues():
    """分析每个策略的具体问题"""
    
    # 文档路径
    doc_path = "e:\\trae\\dify\\data\\面向水库风险监测的低成本低功耗边缘智能系统.docx"
    
    # 初始化处理器
    processor = DocumentProcessor()
    chunking_engine = ChunkingEngine()
    
    # 处理文档
    print("正在处理文档...")
    result = processor.process_file(doc_path)
    if not result.get('success', True):
        print(f"文档处理失败: {result.get('error', '未知错误')}")
        return
    
    raw_content = result.get('raw_content', '')
    if not raw_content:
        print("文档内容为空")
        return
    
    # 使用原始内容
    cleaned_content = raw_content
    
    print(f"文档长度: {len(raw_content)} 字符")
    
    # 定义要测试的策略
    strategies = [
        {
            "name": "按文档结构分段",
            "config": {
                "strategy": "按文档结构分段",
                "max_depth": 3,
                "preserve_structure": True,
                "include_headers": True
            }
        },
        {
            "name": "题目结构分段", 
            "config": {
                "strategy": "题目结构分段",
                "question_pattern": "^\\d+\\."
            }
        }
    ]
    
    analysis_results = {}
    
    for strategy_info in strategies:
        strategy_name = strategy_info["name"]
        config = strategy_info["config"]
        
        print(f"\n=== 分析策略: {strategy_name} ===")
        
        try:
            # 执行切片
            chunks = chunking_engine.chunk_text(cleaned_content, config)
            
            if not chunks:
                print(f"策略 {strategy_name} 没有生成任何切片")
                continue
            
            print(f"生成切片数量: {len(chunks)}")
            
            # 分析切片内容
            chunk_contents = [chunk.get('content', '') for chunk in chunks]
            chunk_lengths = [len(content) for content in chunk_contents]
            
            # 检查重复内容
            content_counter = Counter(chunk_contents)
            duplicates = {content: count for content, count in content_counter.items() if count > 1}
            
            # 检查空切片
            empty_chunks = [i for i, content in enumerate(chunk_contents) if not content.strip()]
            
            # 检查异常短的切片
            very_short_chunks = [i for i, content in enumerate(chunk_contents) if len(content.strip()) < 10]
            
            analysis_results[strategy_name] = {
                "total_chunks": len(chunks),
                "avg_length": sum(chunk_lengths) / len(chunk_lengths) if chunk_lengths else 0,
                "min_length": min(chunk_lengths) if chunk_lengths else 0,
                "max_length": max(chunk_lengths) if chunk_lengths else 0,
                "duplicate_count": len(duplicates),
                "duplicate_rate": len(duplicates) / len(chunks) * 100 if chunks else 0,
                "empty_chunks": len(empty_chunks),
                "very_short_chunks": len(very_short_chunks),
                "duplicates": list(duplicates.keys())[:3],  # 只保存前3个重复内容
                "sample_chunks": chunk_contents[:5],  # 前5个切片内容
                "problematic_chunks": {
                    "empty_indices": empty_chunks[:10],
                    "very_short_indices": very_short_chunks[:10]
                }
            }
            
            print(f"平均长度: {analysis_results[strategy_name]['avg_length']:.2f}")
            print(f"最短长度: {analysis_results[strategy_name]['min_length']}")
            print(f"最长长度: {analysis_results[strategy_name]['max_length']}")
            print(f"重复切片数: {analysis_results[strategy_name]['duplicate_count']}")
            print(f"重复率: {analysis_results[strategy_name]['duplicate_rate']:.2f}%")
            print(f"空切片数: {analysis_results[strategy_name]['empty_chunks']}")
            print(f"异常短切片数: {analysis_results[strategy_name]['very_short_chunks']}")
            
            # 显示前几个切片的内容
            print("\n前3个切片内容:")
            for i, content in enumerate(chunk_contents[:3]):
                print(f"切片 {i}: [{len(content)} 字符] {repr(content[:100])}...")
            
            # 显示重复内容
            if duplicates:
                print("\n重复内容示例:")
                for content, count in list(duplicates.items())[:3]:
                    print(f"重复 {count} 次: {repr(content[:50])}...")
            
        except Exception as e:
            print(f"策略 {strategy_name} 执行失败: {str(e)}")
            analysis_results[strategy_name] = {
                "error": str(e),
                "success": False
            }
    
    # 保存分析结果
    output_dir = "output"
    os.makedirs(output_dir, exist_ok=True)
    
    timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
    output_file = os.path.join(output_dir, f"strategy_analysis_{timestamp}.json")
    
    with open(output_file, 'w', encoding='utf-8') as f:
        json.dump({
            "analysis_time": datetime.now().isoformat(),
            "document_path": doc_path,
            "strategies": analysis_results
        }, f, ensure_ascii=False, indent=2)
    
    print(f"\n分析结果已保存到: {output_file}")
    
    return analysis_results

if __name__ == "__main__":
    analyze_strategy_issues()