"""
辅助工具函数

提供各种实用的辅助功能
"""

import re
import logging
from typing import List, Dict, Any, Optional
from langchain_core.documents import Document

logger = logging.getLogger(__name__)

def format_docs(docs: List[Document]) -> str:
    """
    格式化文档列表为字符串
    
    Args:
        docs (List[Document]): 文档列表
        
    Returns:
        str: 格式化后的字符串
    """
    if not docs:
        return ""
    
    formatted_parts = []
    for i, doc in enumerate(docs, 1):
        content = doc.page_content.strip()
        source = doc.metadata.get('source', 'Unknown')
        
        formatted_parts.append(f"文档 {i} (来源: {source}):\n{content}\n")
    
    return "\n".join(formatted_parts)

def clean_text(text: str) -> str:
    """
    清理文本内容
    
    Args:
        text (str): 原始文本
        
    Returns:
        str: 清理后的文本
    """
    if not text:
        return ""
    
    # 移除多余的空白字符
    text = re.sub(r'\s+', ' ', text)
    
    # 移除特殊字符
    text = re.sub(r'[^\w\s\u4e00-\u9fff.,!?;:()[\]{}"\'-]', '', text)
    
    # 移除多余的换行符
    text = re.sub(r'\n+', '\n', text)
    
    # 去除首尾空白
    text = text.strip()
    
    return text

def extract_keywords(text: str, max_keywords: int = 10) -> List[str]:
    """
    提取文本关键词
    
    Args:
        text (str): 输入文本
        max_keywords (int): 最大关键词数量
        
    Returns:
        List[str]: 关键词列表
    """
    # 简单的关键词提取（基于词频）
    words = re.findall(r'\b\w+\b', text.lower())
    
    # 过滤停用词
    stop_words = {
        'the', 'a', 'an', 'and', 'or', 'but', 'in', 'on', 'at', 'to', 'for',
        'of', 'with', 'by', 'is', 'are', 'was', 'were', 'be', 'been', 'being',
        'have', 'has', 'had', 'do', 'does', 'did', 'will', 'would', 'could',
        'should', 'may', 'might', 'must', 'can', 'this', 'that', 'these',
        'those', 'i', 'you', 'he', 'she', 'it', 'we', 'they', 'me', 'him',
        'her', 'us', 'them', 'my', 'your', 'his', 'her', 'its', 'our', 'their'
    }
    
    # 过滤掉停用词和短词
    filtered_words = [word for word in words if len(word) > 2 and word not in stop_words]
    
    # 统计词频
    word_freq = {}
    for word in filtered_words:
        word_freq[word] = word_freq.get(word, 0) + 1
    
    # 按频率排序并返回前N个
    sorted_words = sorted(word_freq.items(), key=lambda x: x[1], reverse=True)
    
    return [word for word, freq in sorted_words[:max_keywords]]

def summarize_text(text: str, max_sentences: int = 3) -> str:
    """
    生成文本摘要
    
    Args:
        text (str): 输入文本
        max_sentences (int): 最大句子数
        
    Returns:
        str: 摘要文本
    """
    if not text or len(text) < 100:
        return text
    
    # 按句子分割
    sentences = re.split(r'[.!?。！？]', text)
    sentences = [s.strip() for s in sentences if s.strip()]
    
    if len(sentences) <= max_sentences:
        return text
    
    # 简单的摘要：取前面几句和最后几句
    summary_sentences = []
    
    # 取开头句子
    start_count = max_sentences // 2
    summary_sentences.extend(sentences[:start_count])
    
    # 取结尾句子
    end_count = max_sentences - start_count
    if end_count > 0:
        summary_sentences.extend(sentences[-end_count:])
    
    return '。'.join(summary_sentences) + '。'

def calculate_similarity(text1: str, text2: str) -> float:
    """
    计算两个文本的相似度（基于Jaccard系数）
    
    Args:
        text1 (str): 文本1
        text2 (str): 文本2
        
    Returns:
        float: 相似度分数 (0-1)
    """
    if not text1 or not text2:
        return 0.0
    
    # 提取词汇集合
    words1 = set(re.findall(r'\b\w+\b', text1.lower()))
    words2 = set(re.findall(r'\b\w+\b', text2.lower()))
    
    # 计算Jaccard系数
    intersection = len(words1.intersection(words2))
    union = len(words1.union(words2))
    
    if union == 0:
        return 0.0
    
    return intersection / union

def split_into_paragraphs(text: str, min_length: int = 50) -> List[str]:
    """
    将文本分割为段落
    
    Args:
        text (str): 输入文本
        min_length (int): 最小段落长度
        
    Returns:
        List[str]: 段落列表
    """
    if not text:
        return []
    
    # 按双换行符分割段落
    paragraphs = text.split('\n\n')
    
    # 过滤短段落
    filtered_paragraphs = []
    current_paragraph = ""
    
    for paragraph in paragraphs:
        paragraph = paragraph.strip()
        if not paragraph:
            continue
            
        if len(paragraph) >= min_length:
            if current_paragraph:
                filtered_paragraphs.append(current_paragraph)
                current_paragraph = ""
            filtered_paragraphs.append(paragraph)
        else:
            if current_paragraph:
                current_paragraph += "\n\n" + paragraph
            else:
                current_paragraph = paragraph
    
    # 添加最后的段落
    if current_paragraph and len(current_paragraph) >= min_length:
        filtered_paragraphs.append(current_paragraph)
    
    return filtered_paragraphs

def merge_documents(docs: List[Document], separator: str = "\n\n") -> Document:
    """
    合并多个文档为一个
    
    Args:
        docs (List[Document]): 文档列表
        separator (str): 分隔符
        
    Returns:
        Document: 合并后的文档
    """
    if not docs:
        return Document(page_content="", metadata={})
    
    if len(docs) == 1:
        return docs[0]
    
    # 合并内容
    combined_content = separator.join(doc.page_content for doc in docs)
    
    # 合并元数据
    combined_metadata = {}
    sources = []
    file_types = set()
    
    for doc in docs:
        # 收集来源
        source = doc.metadata.get('source')
        if source:
            sources.append(source)
        
        # 收集文件类型
        file_type = doc.metadata.get('file_type')
        if file_type:
            file_types.add(file_type)
        
        # 合并其他元数据
        for key, value in doc.metadata.items():
            if key not in ['source', 'file_type', 'chunk_index']:
                combined_metadata[key] = value
    
    # 设置合并后的元数据
    combined_metadata.update({
        'sources': sources,
        'file_types': list(file_types),
        'merged_from': len(docs),
        'total_length': len(combined_content)
    })
    
    return Document(
        page_content=combined_content,
        metadata=combined_metadata
    )

def validate_document(doc: Document) -> bool:
    """
    验证文档是否有效
    
    Args:
        doc (Document): 文档对象
        
    Returns:
        bool: 是否有效
    """
    if not isinstance(doc, Document):
        return False
    
    if not hasattr(doc, 'page_content') or not doc.page_content:
        return False
    
    if not hasattr(doc, 'metadata') or not isinstance(doc.metadata, dict):
        return False
    
    return True

def filter_documents_by_length(docs: List[Document], 
                             min_length: int = 10, 
                             max_length: int = None) -> List[Document]:
    """
    根据长度过滤文档
    
    Args:
        docs (List[Document]): 文档列表
        min_length (int): 最小长度
        max_length (int): 最大长度
        
    Returns:
        List[Document]: 过滤后的文档列表
    """
    filtered_docs = []
    
    for doc in docs:
        if not validate_document(doc):
            continue
            
        content_length = len(doc.page_content)
        
        if content_length < min_length:
            continue
            
        if max_length and content_length > max_length:
            continue
            
        filtered_docs.append(doc)
    
    logger.info(f"文档长度过滤: {len(docs)} -> {len(filtered_docs)} 个文档")
    return filtered_docs

def deduplicate_documents(docs: List[Document], 
                         similarity_threshold: float = 0.9) -> List[Document]:
    """
    去除重复文档
    
    Args:
        docs (List[Document]): 文档列表
        similarity_threshold (float): 相似度阈值
        
    Returns:
        List[Document]: 去重后的文档列表
    """
    if not docs:
        return []
    
    unique_docs = [docs[0]]  # 保留第一个文档
    
    for doc in docs[1:]:
        is_duplicate = False
        
        for unique_doc in unique_docs:
            similarity = calculate_similarity(doc.page_content, unique_doc.page_content)
            
            if similarity >= similarity_threshold:
                is_duplicate = True
                break
        
        if not is_duplicate:
            unique_docs.append(doc)
    
    logger.info(f"文档去重: {len(docs)} -> {len(unique_docs)} 个文档")
    return unique_docs 