"""
文档总结工具类

这个模块提供了处理和总结多个文件内容的功能。
主要用于将多个文件的内容组织成特定格式，并分批调用API进行处理。
"""
from typing import List, Tuple
import os
import re
from src.utils.api_utils import ApiClient
from src.config import GlobalConfig
import json
from src.utils.runtime_state import RuntimeState

def extract_file_summaries(response: str) -> List[Tuple[str, str]]:
    """
    从API响应中提取文件路径和对应的总结内容
    
    Args:
        response: API返回的响应文本，包含多个[###]块，
                 每个块包含<FILE>和<SUMMARY>标签
    
    Returns:
        List[Tuple[str, str]]: 包含(文件路径, 总结内容)的列表
        
    示例:
        response = '''
        [###]
        <FILE>/path/to/file.py</FILE>
        <SUMMARY>这是总结内容</SUMMARY>
        [/###]
        '''
        result = extract_file_summaries(response)
        # 返回: [('/path/to/file.py', '这是总结内容')]
    """
    # 匹配整个块的模式
    block_pattern = r'\[###\](.*?)\[/###\]'
    
    # 匹配文件路径和总结内容的模式
    file_pattern = r'<FILE>(.*?)</FILE>'
    summary_pattern = r'<SUMMARY>(.*?)</SUMMARY>'
    
    results = []
    
    # 使用re.DOTALL使.能匹配换行符
    blocks = re.finditer(block_pattern, response, re.DOTALL)
    
    for block in blocks:
        block_content = block.group(1)
        
        # 提取文件路径
        file_match = re.search(file_pattern, block_content, re.DOTALL)
        # 提取总结内容
        summary_match = re.search(summary_pattern, block_content, re.DOTALL)
        
        if file_match and summary_match:
            file_path = file_match.group(1).strip()
            summary = summary_match.group(1).strip()
            results.append((file_path, summary))
    
    return results

def save_summary_cache_file() -> None:
    """
    保存总结结果到文件
    确保文件原先的内容不会被保留。
    """
    cache_dir = os.path.join(GlobalConfig.get_work_dir(), GlobalConfig.get_config('cache_dir'))
    summary_file = os.path.join(cache_dir, 'summary_cache.txt')
    from src.utils.file_utils import FileUtils
    FileUtils.ensure_dir_exists(summary_file)
    # 使用 'w' 模式打开文件，确保原有内容被清空
    with open(summary_file, "w", encoding="utf-8") as f:
        all_summaries = RuntimeState.get('summaries')
        # 使用 ensure_ascii=False 以确保中文字符正确编码
        json.dump(all_summaries, f, ensure_ascii=False)

def load_summary_cache_file() -> List[Tuple[str, str]]:
    """
    加载总结结果
    """
    cache_dir = os.path.join(GlobalConfig.get_work_dir(), GlobalConfig.get_config('cache_dir'))
    summary_file = os.path.join(cache_dir, 'summary_cache.txt')
    with open(summary_file, "r", encoding="utf-8") as f:
        all_summaries = json.loads(f.read())  
        RuntimeState.set('summaries', all_summaries)


def merge_to_runtime_state(summaries: List[Tuple[str, str]]) -> None:
    all_summaries = RuntimeState.get('summaries')
    for summary in summaries:
        all_summaries[summary[0]] = summary[1]
    RuntimeState.set('summaries', all_summaries)


def process_file(input_files_path: List[str], clear_cache_file: bool = True) -> None:
    """
    处理多个文件并调用API进行总结
    
    遍历所有输入文件路径，读取文件内容，按特定格式组织成字符串。
    当字符串长度超过限制时，调用API处理当前批次，然后继续处理剩余文件。
    
    Args:
        input_files_path: 要处理的文件路径列表
        
    格式示例:
        #FILE:/path/to/file1.py
        content of file1
        
        #FILE:/path/to/file2.py
        content of file2
    """
    # 初始化API客户端
    api_client = ApiClient()
    
    # 字符串长度限制
    MAX_LENGTH = 320000
    
    # 当前批次的内容
    current_context = []
    current_length = 0
    
    def process_batch(context: str) -> None:
        """处理一个批次的内容"""
        try:
            from src.utils.prompt_utils import generate_summary_prompt
            # 生成总结提示
            summary_prompt = generate_summary_prompt(context)
            response = api_client.get_completion(summary_prompt)
            # 提取文件总结
            summaries = extract_file_summaries(response)
            merge_to_runtime_state(summaries)
        except Exception as e:
            print(f"处理批次时发生错误: {str(e)}")
        
    
    if clear_cache_file:
        RuntimeState.set('summaries', {})
    
    # 遍历所有文件
    for file_path in input_files_path:
        try:
            # 读取文件内容
            with open(file_path, 'r', encoding='utf-8') as f:
                content = f.read()
                
            # 构建这个文件的格式化内容
            file_content = f"#FILE:{file_path}\n{content}\n"
            file_length = len(file_content)
            
            # 检查是否需要处理当前批次
            if current_length + file_length > MAX_LENGTH:
                # 处理当前批次
                if current_context:
                    context_str = "\n".join(current_context)
                    process_batch(context_str)
                    
                # 重置当前批次
                current_context = []
                current_length = 0
            
            # 添加到当前批次
            current_context.append(file_content)
            current_length += file_length
            
        except Exception as e:
            print(f"处理文件 {file_path} 时发生错误: {str(e)}")
            continue
    
    # 处理最后一个批次
    if current_context:
        context_str = "\n".join(current_context)
        process_batch(context_str)
    save_summary_cache_file()   
     