#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import sys
import os
sys.path.append('src')

from processors.docx_processor import DOCXProcessor
from core.chunking_engine import ChunkingEngine
from core.text_cleaner import TextCleaner
import json

def debug_structural_chunking():
    """调试按文档结构分段策略"""
    
    print("=== 调试按文档结构分段策略 ===")
    
    # 处理Word文档
    docx_file = 'data/面向水库风险监测的低成本低功耗边缘智能系统.docx'
    
    if not os.path.exists(docx_file):
        print(f"错误：文件 {docx_file} 不存在")
        return
    
    # 提取文档内容
    processor = DOCXProcessor()
    with open(docx_file, 'rb') as f:
        file_content = f.read()
    
    result = processor.extract_content(file_content)
    raw_text = result['text']
    structure = result.get('structure', {})
    
    print(f"原始文档长度: {len(raw_text)} 字符")
    
    # 数据清洗
    cleaning_config = {
        'remove_extra_whitespace': True,
        'fix_line_breaks': True,
        'remove_headers_footers': False,
        'remove_duplicates': False,
        'remove_empty_lines': True,
        'normalize_punctuation': False
    }
    
    cleaner = TextCleaner(cleaning_config)
    cleaned_text = cleaner.clean_text(raw_text)
    
    print(f"清洗后文档长度: {len(cleaned_text)} 字符")
    
    # 配置按文档结构分段策略
    chunking_config = {
        'strategy': '按文档结构分段',
        'max_depth': 3,
        'preserve_structure': True
    }
    
    # 创建切片引擎
    engine = ChunkingEngine(chunking_config)
    
    # 执行切片
    print("\n开始执行按文档结构分段...")
    chunks = engine.chunk_document(cleaned_text)
    
    print(f"\n切片结果统计:")
    print(f"- 总切片数: {len(chunks)}")
    
    if chunks:
        lengths = [chunk['metadata']['char_count'] for chunk in chunks]
        print(f"- 平均长度: {sum(lengths) / len(lengths):.1f} 字符")
        print(f"- 最短切片: {min(lengths)} 字符")
        print(f"- 最长切片: {max(lengths)} 字符")
        
        # 检查是否所有切片内容都相同
        unique_contents = set(chunk['content'] for chunk in chunks)
        print(f"- 唯一内容数: {len(unique_contents)}")
        
        if len(unique_contents) == 1:
            print("⚠️  警告：所有切片内容都相同！")
        
        # 显示前5个切片的基本信息
        print("\n前5个切片信息:")
        for i, chunk in enumerate(chunks[:5]):
            content_preview = chunk['content'][:100].replace('\n', ' ')
            print(f"切片 {i}: {chunk['metadata']['char_count']} 字符 - {content_preview}...")
            
        # 显示最后5个切片的基本信息
        if len(chunks) > 5:
            print("\n最后5个切片信息:")
            for i, chunk in enumerate(chunks[-5:], len(chunks)-5):
                content_preview = chunk['content'][:100].replace('\n', ' ')
                print(f"切片 {i}: {chunk['metadata']['char_count']} 字符 - {content_preview}...")
    
    # 保存调试结果
    debug_output = {
        'config': chunking_config,
        'total_chunks': len(chunks),
        'chunk_details': []
    }
    
    for i, chunk in enumerate(chunks):
        debug_output['chunk_details'].append({
            'chunk_id': chunk['chunk_id'],
            'char_count': chunk['metadata']['char_count'],
            'content_preview': chunk['content'][:200],
            'metadata': chunk['metadata']
        })
    
    # 保存到文件
    output_file = 'debug_structural_chunking_result.json'
    with open(output_file, 'w', encoding='utf-8') as f:
        json.dump(debug_output, f, ensure_ascii=False, indent=2)
    
    print(f"\n调试结果已保存到: {output_file}")

if __name__ == '__main__':
    debug_structural_chunking()