'''
msg: 
name: 
param: 
return: 
'''
from pathlib import Path
from .config_loader import ConfigLoader
from .api_client import APIClient
from .text_processor import TextProcessor
from .file_handler import FileManager
from utils.logger import logger

class NovelProcessor:
    def __init__(self, config_path: Path):
        self.config = ConfigLoader().config
        self.api_client = APIClient(self.config)
        self.text_processor = TextProcessor(
            {'processing': self.config['processing']},
            self.api_client
        )
        self.file_manager = FileManager()
        
    def process(self, input_file: Path, output_dir: Path):
        """处理单个文件"""
        processed = []
        progress_path = output_dir / "progress.json"
        
        try:
            content = self.file_manager.read_file(input_file)
            if not content.strip():
                raise ValueError("输入文件内容为空")
            
            chunks = self.text_processor.split_text(content)
            logger.info(f"分块完成 | 总块数: {len(chunks)} | 平均长度: {sum(len(c) for c in chunks)//len(chunks)}字符")
            logger.debug(f"分块详情: {[len(c) for c in chunks]}")
            
            # 分块有效性校验
            total_length = sum(len(c) for c in chunks)
            if total_length < len(content) * 0.8:
                raise ValueError(f"分块后内容丢失 | 原文长度: {len(content)} | 分块总长度: {total_length}")
            
            for idx, chunk in enumerate(chunks, 1):
                if not chunk.strip():
                    raise ValueError(f"第{idx}块内容为空")
                
                logger.debug(f"处理块 {idx} | 输入长度: {len(chunk)}")
                logger.trace(f"输入内容:\n{chunk[:200]}...")  # 记录前200字
                
                rewritten = self.api_client.rewrite(chunk)
                
                logger.trace(f"输出内容:\n{rewritten[:200]}...")  # 记录前200字
                processed.append(rewritten)
                
                # 实时保存进度
                self.file_manager.write_file(
                    output_dir / f"temp_{idx}.txt", 
                    "\n\n".join(processed)
                )
                
                logger.debug(f"处理块 {idx} | 输出长度: {len(rewritten)}")
                
            # 最终合并时添加段落间距
            final_content = '\n\n'.join(processed)
            output_path = output_dir / self.config['processing']['output_file']
            self.file_manager.write_file(output_path, final_content)
            
            # 新增校验步骤
            self._validate_content(content, final_content, input_file)
            
        except Exception as e:
            logger.error(f"处理中断，已保存进度到 {progress_path}")
            raise 

    def _validate_content(self, original: str, processed: str, file: Path):
        """校验处理结果有效性"""
        # 调整校验阈值为20%
        if len(processed) < len(original) * 0.2:
            raise ValueError(f"处理结果过短 | 原文长度: {len(original)} | 结果长度: {len(processed)}")
        
        # 关键内容保留校验
        key_phrases = ["主角", "重要地点"]  # 根据实际内容调整
        for phrase in key_phrases:
            if phrase in original and phrase not in processed:
                raise ValueError(f"关键内容丢失: {phrase}")
        
        # 段落结构校验
        original_para_count = original.count('\n\n')
        processed_para_count = processed.count('\n\n')
        if abs(original_para_count - processed_para_count) > 5:
            logger.warning(f"段落数量变化较大 | 原文段落: {original_para_count} | 结果段落: {processed_para_count}") 