import asyncio
from typing import List, Dict, Any, Optional
from dataclasses import dataclass
import re
import json
from datetime import datetime

@dataclass
class DocumentChunk:
    """文档块数据结构"""
    content: str
    source: str
    relevance_score: float = 0.0
    metadata: Dict[str, Any] = None

class DocumentProcessor:
    """
    文档处理和摘要工具
    """
    
    def __init__(self, max_chunk_size: int = 1000, overlap_size: int = 100):
        self.max_chunk_size = max_chunk_size
        self.overlap_size = overlap_size
    
    def chunk_text(self, text: str, source: str = "unknown") -> List[DocumentChunk]:
        """
        将文本分割成块
        
        Args:
            text: 输入文本
            source: 文本来源
            
        Returns:
            文档块列表
        """
        if not text:
            return []
        
        chunks = []
        text_length = len(text)
        start = 0
        
        while start < text_length:
            end = start + self.max_chunk_size
            
            if end >= text_length:
                chunk_text = text[start:]
            else:
                # 在句子边界处分割
                last_period = text.rfind('。', start, end)
                last_newline = text.rfind('\n', start, end)
                
                if last_period > start:
                    end = last_period + 1
                elif last_newline > start:
                    end = last_newline + 1
                
                chunk_text = text[start:end]
            
            chunk = DocumentChunk(
                content=chunk_text.strip(),
                source=source,
                metadata={
                    "start_index": start,
                    "end_index": end,
                    "chunk_index": len(chunks)
                }
            )
            chunks.append(chunk)
            
            # 添加重叠
            start = end - self.overlap_size
            if start >= end:
                break
        
        return chunks
    
    def extract_key_points(self, text: str, max_points: int = 5) -> List[str]:
        """
        提取文本的关键点
        
        Args:
            text: 输入文本
            max_points: 最大关键点数量
            
        Returns:
            关键点列表
        """
        if not text:
            return []
        
        # 简单的关键点提取逻辑
        sentences = re.split('[。！？.!?]', text)
        key_points = []
        
        for sentence in sentences:
            sentence = sentence.strip()
            if len(sentence) > 20 and len(sentence) < 200:
                # 包含数字、重要概念或总结性词汇的句子
                if any(keyword in sentence for keyword in [
                    '关键', '重要', '主要', '核心', '基础', '显著', '研究表明',
                    '数据显示', '结论', '结果显示', '研究发现'
                ]) or re.search(r'\d+', sentence):
                    key_points.append(sentence)
        
        return key_points[:max_points]
    
    def calculate_relevance_score(self, text: str, keywords: List[str]) -> float:
        """
        计算文本与关键词的相关性分数
        
        Args:
            text: 文本内容
            keywords: 关键词列表
            
        Returns:
            相关性分数 (0-1)
        """
        if not text or not keywords:
            return 0.0
        
        text_lower = text.lower()
        total_keywords = len(keywords)
        matched_keywords = 0
        
        for keyword in keywords:
            if keyword.lower() in text_lower:
                matched_keywords += 1
        
        return matched_keywords / total_keywords if total_keywords > 0 else 0.0
    
    def organize_by_relevance(
        self, 
        chunks: List[DocumentChunk], 
        keywords: List[str]
    ) -> List[DocumentChunk]:
        """
        根据相关性组织文档块
        
        Args:
            chunks: 文档块列表
            keywords: 关键词列表
            
        Returns:
            按相关性排序的文档块列表
        """
        for chunk in chunks:
            chunk.relevance_score = self.calculate_relevance_score(
                chunk.content, keywords
            )
        
        return sorted(chunks, key=lambda x: x.relevance_score, reverse=True)
    
    def create_summary(
        self, 
        chunks: List[DocumentChunk], 
        keywords: List[str],
        max_length: int = 500
    ) -> str:
        """
        创建摘要
        
        Args:
            chunks: 文档块列表
            keywords: 关键词列表
            max_length: 摘要最大长度
            
        Returns:
            摘要文本
        """
        if not chunks:
            return ""
        
        # 按相关性排序
        relevant_chunks = self.organize_by_relevance(chunks, keywords)
        
        # 选择最相关的块
        selected_chunks = []
        current_length = 0
        
        for chunk in relevant_chunks:
            if chunk.relevance_score > 0:
                chunk_length = len(chunk.content)
                if current_length + chunk_length <= max_length:
                    selected_chunks.append(chunk)
                    current_length += chunk_length
                else:
                    # 添加部分文本
                    remaining_space = max_length - current_length
                    if remaining_space > 50:
                        partial_content = chunk.content[:remaining_space] + "..."
                        selected_chunks.append(DocumentChunk(
                            content=partial_content,
                            source=chunk.source,
                            relevance_score=chunk.relevance_score
                        ))
                    break
        
        # 合并内容
        summary_parts = [chunk.content for chunk in selected_chunks]
        summary = " ".join(summary_parts)
        
        return summary.strip()
    
    def extract_metadata(self, text: str, source: str) -> Dict[str, Any]:
        """
        提取文档元数据
        
        Args:
            text: 文档文本
            source: 文档来源
            
        Returns:
            元数据字典
        """
        metadata = {
            "source": source,
            "word_count": len(text),
            "char_count": len(text),
            "created_at": datetime.now().isoformat()
        }
        
        # 提取可能的日期
        date_patterns = [
            r'\d{4}年\d{1,2}月\d{1,2}日',
            r'\d{4}-\d{1,2}-\d{1,2}',
            r'\d{1,2}/\d{1,2}/\d{4}'
        ]
        
        for pattern in date_patterns:
            matches = re.findall(pattern, text)
            if matches:
                metadata["dates_found"] = matches
                break
        
        # 提取数字信息
        numbers = re.findall(r'\d+(?:\.\d+)?', text)
        if numbers:
            metadata["numbers_found"] = len(numbers)
        
        return metadata
    
    def generate_outline(self, chunks: List[DocumentChunk]) -> List[str]:
        """
        基于文档内容生成大纲
        
        Args:
            chunks: 文档块列表
            
        Returns:
            大纲列表
        """
        if not chunks:
            return []
        
        # 合并所有内容
        full_text = " ".join([chunk.content for chunk in chunks])
        
        # 识别主题
        sentences = re.split('[。！？.!?]', full_text)
        themes = []
        
        for sentence in sentences:
            sentence = sentence.strip()
            if len(sentence) > 10:
                # 查找可能的章节标题或主题
                if any(keyword in sentence[:20] for keyword in [
                    '介绍', '概述', '背景', '方法', '结果', '讨论',
                    '结论', '展望', '应用', '影响', '发展'
                ]):
                    themes.append(sentence[:50])
        
        # 去重并限制数量
        unique_themes = list(set(themes))[:10]
        return unique_themes