from typing import Dict, Any, Optional, List, Tuple
from datetime import datetime
import json
import asyncio
from pathlib import Path
import numpy as np
from dataclasses import dataclass, asdict
from enum import Enum

from utils.logger import Logger
from utils.exceptions import ToolError
from utils.circuit_breaker import circuit_breaker
from .doc_search_engine import DocSearchEngine
from .segment_analyzer import SegmentAnalyzer

logger = get_logger("doc_metadata_manager")


class DocTaskType(Enum):
    """文档任务类型"""
    SUMMARY = "summary"  # 文档总结
    CHAPTER_SUMMARY = "chapter_summary"  # 章节总结
    SECTION_RETRIEVAL = "section_retrieval"  # 段落检索
    PAGE_RETRIEVAL = "page_retrieval"  # 页面检索
    PARAGRAPH_RETRIEVAL = "paragraph_retrieval"  # 段落检索
    INTELLIGENT_SEGMENT = "intelligent_segment"  # 智能分段
    FULL_TEXT_SEARCH = "full_text_search"  # 全文搜索


@dataclass
class DocMetadata:
    """文档元数据结构"""
    file_path: str
    file_name: str
    file_type: str
    file_size: int
    created_time: datetime
    modified_time: datetime
    total_pages: int
    total_words: int
    total_chars: int
    language: str
    encoding: str
    title: str
    author: Optional[str] = None
    subject: Optional[str] = None
    keywords: Optional[List[str]] = None
    description: Optional[str] = None
    chapter_count: int = 0
    section_count: int = 0
    paragraph_count: int = 0
    indexed_segments: int = 0
    indexing_status: str = "pending"  # pending, processing, completed, failed
    index_error: Optional[str] = None
    created_at: datetime = None
    updated_at: datetime = None

    def __post_init__(self):
        if self.created_at is None:
            self.created_at = datetime.now()
        if self.updated_at is None:
            self.updated_at = datetime.now()


class DocMetadataManager:
    """文档元数据管理器"""

    def __init__(self, metadata_db_path: str = "./data/doc_metadata"):
        self.metadata_db_path = Path(metadata_db_path)
        self.metadata_db_path.mkdir(parents=True, exist_ok=True)
        self.search_engine = DocSearchEngine()
        self.analyzer = SegmentAnalyzer()
        self.metadata_cache = {}  # 内存缓存
        
        # 加载现有元数据
        self._load_metadata_cache()

    def extract_metadata(self, file_path: str) -> DocMetadata:
        """提取文档元数据"""
        try:
            file_path = Path(file_path)
            if not file_path.exists():
                raise ToolError(f"文件不存在: {file_path}")
            
            # 基本信息
            stat = file_path.stat()
            file_size = stat.st_size
            created_time = datetime.fromtimestamp(stat.st_ctime)
            modified_time = datetime.fromtimestamp(stat.st_mtime)
            
            # 内容分析
            content_info = self._analyze_content(str(file_path))
            
            # 提取PDF/DOCX特定元数据
            specific_metadata = self._extract_specific_metadata(str(file_path))
            
            # 构建元数据对象
            metadata = DocMetadata(
                file_path=str(file_path),
                file_name=file_path.name,
                file_type=file_path.suffix.lower(),
                file_size=file_size,
                created_time=created_time,
                modified_time=modified_time,
                total_pages=content_info.get("total_pages", 1),
                total_words=content_info.get("total_words", 0),
                total_chars=content_info.get("total_chars", 0),
                language=content_info.get("language", "unknown"),
                encoding=content_info.get("encoding", "utf-8"),
                title=specific_metadata.get("title", file_path.stem),
                author=specific_metadata.get("author"),
                subject=specific_metadata.get("subject"),
                keywords=specific_metadata.get("keywords"),
                description=specific_metadata.get("description"),
                chapter_count=content_info.get("chapter_count", 0),
                section_count=content_info.get("section_count", 0),
                paragraph_count=content_info.get("paragraph_count", 0),
                indexed_segments=0,
                indexing_status="pending"
            )
            
            logger.info(f"元数据提取完成: {file_path.name}")
            return metadata
            
        except Exception as e:
            logger.error(f"元数据提取失败: {file_path}, error={str(e)}")
            raise ToolError(f"元数据提取失败: {str(e)}")

    def identify_task_type(self, query: str, context: Optional[Dict[str, Any]] = None) -> DocTaskType:
        """识别文档任务类型"""
        try:
            query_lower = query.lower()
            
            # 关键词匹配
            summary_keywords = ["总结", "概括", "概要", "摘要", "overview", "summary"]
            chapter_keywords = ["章节", "章", "chapter", "section"]
            page_keywords = ["页", "page", "第几页"]
            paragraph_keywords = ["段落", "段", "paragraph"]
            
            # 任务类型识别逻辑
            if any(keyword in query_lower for keyword in summary_keywords):
                if any(keyword in query_lower for keyword in chapter_keywords):
                    return DocTaskType.CHAPTER_SUMMARY
                else:
                    return DocTaskType.SUMMARY
            
            elif any(keyword in query_lower for keyword in chapter_keywords):
                return DocTaskType.SECTION_RETRIEVAL
            
            elif any(keyword in query_lower for keyword in page_keywords):
                return DocTaskType.PAGE_RETRIEVAL
            
            elif any(keyword in query_lower for keyword in paragraph_keywords):
                return DocTaskType.PARAGRAPH_RETRIEVAL
            
            elif "分段" in query_lower or "segment" in query_lower:
                return DocTaskType.INTELLIGENT_SEGMENT
            
            else:
                # 基于上下文判断
                if context and context.get("task_history"):
                    last_task = context["task_history"][-1]
                    if last_task.get("task_type") == "document_summary":
                        return DocTaskType.CHAPTER_SUMMARY
                
                return DocTaskType.FULL_TEXT_SEARCH
                
        except Exception as e:
            logger.error(f"任务类型识别失败: {str(e)}")
            return DocTaskType.FULL_TEXT_SEARCH

    def process_document_task(self, file_path: str, query: str, 
                           task_type: Optional[DocTaskType] = None,
                           context: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
        """处理文档任务"""
        try:
            # 识别任务类型
            if task_type is None:
                task_type = self.identify_task_type(query, context)
            
            logger.info(f"处理文档任务: {task_type.value}, query='{query}', file={Path(file_path).name}")
            
            # 根据任务类型处理
            if task_type == DocTaskType.SUMMARY:
                return self._process_summary_task(file_path, query)
            
            elif task_type == DocTaskType.CHAPTER_SUMMARY:
                return self._process_chapter_summary_task(file_path, query)
            
            elif task_type == DocTaskType.SECTION_RETRIEVAL:
                return self._process_section_retrieval_task(file_path, query)
            
            elif task_type == DocTaskType.PAGE_RETRIEVAL:
                return self._process_page_retrieval_task(file_path, query)
            
            elif task_type == DocTaskType.PARAGRAPH_RETRIEVAL:
                return self._process_paragraph_retrieval_task(file_path, query)
            
            elif task_type == DocTaskType.INTELLIGENT_SEGMENT:
                return self._process_intelligent_segment_task(file_path, query)
            
            else:  # FULL_TEXT_SEARCH
                return self._process_full_text_search_task(file_path, query)
                
        except Exception as e:
            logger.error(f"文档任务处理失败: {str(e)}")
            return {
                "success": False,
                "error": str(e),
                "task_type": task_type.value if task_type else "unknown",
                "query": query,
                "file_path": file_path
            }

    def index_document(self, file_path: str, force_reindex: bool = False) -> bool:
        """索引文档"""
        try:
            file_path = Path(file_path)
            if not file_path.exists():
                logger.error(f"文件不存在: {file_path}")
                return False
            
            # 检查是否需要重新索引
            existing_metadata = self.get_metadata(str(file_path))
            if existing_metadata and not force_reindex:
                # 检查文件是否被修改
                current_mtime = datetime.fromtimestamp(file_path.stat().st_mtime)
                if existing_metadata.modified_time >= current_mtime:
                    logger.info(f"文档已是最新，无需重新索引: {file_path.name}")
                    return True
            
            # 提取元数据
            metadata = self.extract_metadata(str(file_path))
            metadata.indexing_status = "processing"
            self._save_metadata(metadata)
            
            # 索引文档
            success = self.search_engine.index_document(str(file_path), asdict(metadata))
            
            if success:
                # 更新元数据
                search_metadata = self.search_engine.get_document_metadata(str(file_path))
                metadata.indexed_segments = search_metadata.get("total_segments", 0)
                metadata.indexing_status = "completed"
                metadata.updated_at = datetime.now()
                self._save_metadata(metadata)
                
                logger.info(f"文档索引成功: {file_path.name}")
                return True
            else:
                metadata.indexing_status = "failed"
                metadata.index_error = "索引失败"
                self._save_metadata(metadata)
                return False
                
        except Exception as e:
            logger.error(f"文档索引失败: {file_path}, error={str(e)}")
            
            # 更新元数据状态
            try:
                metadata = self.get_metadata(str(file_path))
                if metadata:
                    metadata.indexing_status = "failed"
                    metadata.index_error = str(e)
                    self._save_metadata(metadata)
            except:
                pass
            
            return False

    def get_metadata(self, file_path: str) -> Optional[DocMetadata]:
        """获取文档元数据"""
        return self.metadata_cache.get(str(file_path))

    def search_documents(self, query: str, file_paths: Optional[List[str]] = None,
                        segment_types: Optional[List[str]] = None,
                        limit: int = 10) -> List[Dict[str, Any]]:
        """搜索文档"""
        try:
            # 使用搜索引擎
            results = self.search_engine.search_documents(
                query=query,
                segment_types=segment_types,
                limit=limit
            )
            
            # 增强结果信息
            enhanced_results = []
            for result in results:
                file_path = result["file_path"]
                metadata = self.get_metadata(file_path)
                
                enhanced_result = {
                    **result,
                    "file_metadata": asdict(metadata) if metadata else None,
                    "task_type": self.identify_task_type(query).value
                }
                enhanced_results.append(enhanced_result)
            
            return enhanced_results
            
        except Exception as e:
            logger.error(f"文档搜索失败: {str(e)}")
            return []

    def _analyze_content(self, file_path: str) -> Dict[str, Any]:
        """分析文档内容"""
        try:
            # 读取内容
            content = self.analyzer._read_file_content(file_path)
            
            # 基本统计
            total_chars = len(content)
            total_words = len(content.split())
            total_lines = len(content.split('\n'))
            total_pages = (total_lines + 49) // 50  # 假设每页50行
            
            # 语言检测（简化版）
            language = "chinese" if any(ord(c) > 127 for c in content[:1000]) else "english"
            
            # 章节统计
            chapters = self.analyzer.extract_chapters(file_path)
            chapter_count = len(chapters)
            
            # 段落统计
            paragraphs = self.analyzer.extract_paragraphs(file_path)
            paragraph_count = len(paragraphs)
            
            # 结构化段落统计
            sections = self.analyzer.extract_sections(file_path)
            section_count = len(sections)
            
            return {
                "total_pages": total_pages,
                "total_words": total_words,
                "total_chars": total_chars,
                "language": language,
                "encoding": "utf-8",
                "chapter_count": chapter_count,
                "section_count": section_count,
                "paragraph_count": paragraph_count
            }
            
        except Exception as e:
            logger.error(f"内容分析失败: {str(e)}")
            return {
                "total_pages": 1,
                "total_words": 0,
                "total_chars": 0,
                "language": "unknown",
                "encoding": "utf-8",
                "chapter_count": 0,
                "section_count": 0,
                "paragraph_count": 0
            }

    def _extract_specific_metadata(self, file_path: str) -> Dict[str, Any]:
        """提取特定格式的元数据"""
        try:
            file_path = Path(file_path)
            
            if file_path.suffix.lower() == '.pdf':
                return self._extract_pdf_metadata(str(file_path))
            elif file_path.suffix.lower() in ['.docx', '.doc']:
                return self._extract_docx_metadata(str(file_path))
            else:
                return {"title": file_path.stem}
                
        except Exception as e:
            logger.error(f"特定元数据提取失败: {str(e)}")
            return {"title": Path(file_path).stem}

    def _extract_pdf_metadata(self, file_path: str) -> Dict[str, Any]:
        """提取PDF元数据"""
        try:
            import pdfplumber
            
            with pdfplumber.open(file_path) as pdf:
                metadata = pdf.metadata or {}
                
                return {
                    "title": metadata.get("Title", Path(file_path).stem),
                    "author": metadata.get("Author"),
                    "subject": metadata.get("Subject"),
                    "keywords": metadata.get("Keywords", "").split(",") if metadata.get("Keywords") else None,
                    "description": metadata.get("Description")
                }
                
        except Exception as e:
            logger.error(f"PDF元数据提取失败: {str(e)}")
            return {"title": Path(file_path).stem}

    def _extract_docx_metadata(self, file_path: str) -> Dict[str, Any]:
        """提取DOCX元数据"""
        try:
            from docx import Document
            
            doc = Document(file_path)
            core_props = doc.core_properties
            
            return {
                "title": core_props.title or Path(file_path).stem,
                "author": core_props.author,
                "subject": core_props.subject,
                "keywords": core_props.keywords.split(",") if core_props.keywords else None,
                "description": core_props.comments
            }
            
        except Exception as e:
            logger.error(f"DOCX元数据提取失败: {str(e)}")
            return {"title": Path(file_path).stem}

    def _process_summary_task(self, file_path: str, query: str) -> Dict[str, Any]:
        """处理总结任务"""
        try:
            # 获取文档的章节段落
            segments = self.search_engine.get_document_segments(file_path, ["chapter", "section"])
            
            # 生成总结（简化版，实际应该使用LLM）
            summary = self._generate_summary(segments, query)
            
            return {
                "success": True,
                "task_type": DocTaskType.SUMMARY.value,
                "query": query,
                "file_path": file_path,
                "summary": summary,
                "segments_used": len(segments),
                "method": "chapter_based_summary"
            }
            
        except Exception as e:
            logger.error(f"总结任务处理失败: {str(e)}")
            return {
                "success": False,
                "error": str(e),
                "task_type": DocTaskType.SUMMARY.value,
                "query": query,
                "file_path": file_path
            }

    def _process_chapter_summary_task(self, file_path: str, query: str) -> Dict[str, Any]:
        """处理章节总结任务"""
        try:
            # 提取章节
            chapters = self.analyzer.extract_chapters(file_path)
            
            # 根据查询匹配相关章节
            relevant_chapters = []
            for chapter in chapters:
                if self._is_relevant_chapter(chapter, query):
                    chapter_summary = self._generate_chapter_summary(chapter, query)
                    relevant_chapters.append({
                        "title": chapter["title"],
                        "page_num": chapter["page_num"],
                        "summary": chapter_summary,
                        "content_preview": chapter["content"][:500] + "..."
                    })
            
            return {
                "success": True,
                "task_type": DocTaskType.CHAPTER_SUMMARY.value,
                "query": query,
                "file_path": file_path,
                "relevant_chapters": relevant_chapters,
                "total_chapters": len(chapters),
                "relevant_count": len(relevant_chapters)
            }
            
        except Exception as e:
            logger.error(f"章节总结任务处理失败: {str(e)}")
            return {
                "success": False,
                "error": str(e),
                "task_type": DocTaskType.CHAPTER_SUMMARY.value,
                "query": query,
                "file_path": file_path
            }

    def _process_section_retrieval_task(self, file_path: str, query: str) -> Dict[str, Any]:
        """处理段落检索任务"""
        try:
            # 获取结构化段落
            segments = self.search_engine.get_document_segments(file_path, ["section", "chapter"])
            
            # 搜索相关段落
            relevant_segments = self.search_engine.search_by_similarity(query, segments, top_k=5)
            
            return {
                "success": True,
                "task_type": DocTaskType.SECTION_RETRIEVAL.value,
                "query": query,
                "file_path": file_path,
                "relevant_segments": relevant_segments,
                "total_segments": len(segments),
                "relevant_count": len(relevant_segments)
            }
            
        except Exception as e:
            logger.error(f"段落检索任务处理失败: {str(e)}")
            return {
                "success": False,
                "error": str(e),
                "task_type": DocTaskType.SECTION_RETRIEVAL.value,
                "query": query,
                "file_path": file_path
            }

    def _process_page_retrieval_task(self, file_path: str, query: str) -> Dict[str, Any]:
        """处理页面检索任务"""
        try:
            # 获取页面段落
            segments = self.search_engine.get_document_segments(file_path, ["page"])
            
            # 搜索相关页面
            relevant_segments = self.search_engine.search_by_similarity(query, segments, top_k=3)
            
            return {
                "success": True,
                "task_type": DocTaskType.PAGE_RETRIEVAL.value,
                "query": query,
                "file_path": file_path,
                "relevant_pages": relevant_segments,
                "total_pages": len(segments),
                "relevant_count": len(relevant_segments)
            }
            
        except Exception as e:
            logger.error(f"页面检索任务处理失败: {str(e)}")
            return {
                "success": False,
                "error": str(e),
                "task_type": DocTaskType.PAGE_RETRIEVAL.value,
                "query": query,
                "file_path": file_path
            }

    def _process_paragraph_retrieval_task(self, file_path: str, query: str) -> Dict[str, Any]:
        """处理段落检索任务"""
        try:
            # 获取自然段落
            segments = self.search_engine.get_document_segments(file_path, ["paragraph"])
            
            # 搜索相关段落
            relevant_segments = self.search_engine.search_by_similarity(query, segments, top_k=10)
            
            return {
                "success": True,
                "task_type": DocTaskType.PARAGRAPH_RETRIEVAL.value,
                "query": query,
                "file_path": file_path,
                "relevant_paragraphs": relevant_segments,
                "total_paragraphs": len(segments),
                "relevant_count": len(relevant_segments)
            }
            
        except Exception as e:
            logger.error(f"段落检索任务处理失败: {str(e)}")
            return {
                "success": False,
                "error": str(e),
                "task_type": DocTaskType.PARAGRAPH_RETRIEVAL.value,
                "query": query,
                "file_path": file_path
            }

    def _process_intelligent_segment_task(self, file_path: str, query: str) -> Dict[str, Any]:
        """处理智能分段任务"""
        try:
            # 智能分段
            intelligent_segments = self.analyzer.intelligent_segment(file_path)
            
            # 搜索相关分段
            relevant_segments = self.search_engine.search_by_similarity(query, intelligent_segments, top_k=5)
            
            return {
                "success": True,
                "task_type": DocTaskType.INTELLIGENT_SEGMENT.value,
                "query": query,
                "file_path": file_path,
                "intelligent_segments": intelligent_segments,
                "relevant_segments": relevant_segments,
                "total_segments": len(intelligent_segments),
                "relevant_count": len(relevant_segments)
            }
            
        except Exception as e:
            logger.error(f"智能分段任务处理失败: {str(e)}")
            return {
                "success": False,
                "error": str(e),
                "task_type": DocTaskType.INTELLIGENT_SEGMENT.value,
                "query": query,
                "file_path": file_path
            }

    def _process_full_text_search_task(self, file_path: str, query: str) -> Dict[str, Any]:
        """处理全文搜索任务"""
        try:
            # 全文搜索
            search_results = self.search_engine.search_documents(
                query=query,
                file_paths=[file_path],
                limit=20
            )
            
            return {
                "success": True,
                "task_type": DocTaskType.FULL_TEXT_SEARCH.value,
                "query": query,
                "file_path": file_path,
                "search_results": search_results,
                "result_count": len(search_results)
            }
            
        except Exception as e:
            logger.error(f"全文搜索任务处理失败: {str(e)}")
            return {
                "success": False,
                "error": str(e),
                "task_type": DocTaskType.FULL_TEXT_SEARCH.value,
                "query": query,
                "file_path": file_path
            }

    def _generate_summary(self, segments: List[Dict[str, Any]], query: str) -> str:
        """生成总结（简化版）"""
        # 这里应该使用LLM生成真正的总结
        # 目前返回简单的统计信息
        total_content = " ".join([seg.get("content", "") for seg in segments])
        word_count = len(total_content.split())
        
        return f"基于{len(segments)}个段落生成的总结。总词数：{word_count}。查询相关：{query}"

    def _generate_chapter_summary(self, chapter: Dict[str, Any], query: str) -> str:
        """生成章节总结（简化版）"""
        content = chapter.get("content", "")
        word_count = len(content.split())
        
        return f"章节'{chapter['title']}'的总结。页码：{chapter.get('page_num', 1)}。词数：{word_count}。"

    def _is_relevant_chapter(self, chapter: Dict[str, Any], query: str) -> bool:
        """判断章节是否与查询相关"""
        query_lower = query.lower()
        title_lower = chapter.get("title", "").lower()
        content_lower = chapter.get("content", "").lower()
        
        # 简单的关键词匹配
        return query_lower in title_lower or query_lower in content_lower

    def _save_metadata(self, metadata: DocMetadata):
        """保存元数据"""
        try:
            metadata_file = self.metadata_db_path / f"{Path(metadata.file_path).stem}.json"
            
            with open(metadata_file, 'w', encoding='utf-8') as f:
                json.dump(asdict(metadata), f, ensure_ascii=False, indent=2, default=str)
            
            # 更新缓存
            self.metadata_cache[metadata.file_path] = metadata
            
            logger.info(f"元数据保存完成: {metadata.file_name}")
            
        except Exception as e:
            logger.error(f"元数据保存失败: {str(e)}")

    def _load_metadata_cache(self):
        """加载元数据缓存"""
        try:
            for metadata_file in self.metadata_db_path.glob("*.json"):
                try:
                    with open(metadata_file, 'r', encoding='utf-8') as f:
                        data = json.load(f)
                        metadata = DocMetadata(**data)
                        self.metadata_cache[metadata.file_path] = metadata
                except Exception as e:
                    logger.warning(f"元数据文件加载失败: {metadata_file}, error={str(e)}")
            
            logger.info(f"元数据缓存加载完成: {len(self.metadata_cache)} 个文档")
            
        except Exception as e:
            logger.error(f"元数据缓存加载失败: {str(e)}")

    def get_statistics(self) -> Dict[str, Any]:
        """获取统计信息"""
        try:
            total_docs = len(self.metadata_cache)
            indexed_docs = sum(1 for m in self.metadata_cache.values() if m.indexing_status == "completed")
            failed_docs = sum(1 for m in self.metadata_cache.values() if m.indexing_status == "failed")
            
            total_segments = sum(m.indexed_segments for m in self.metadata_cache.values())
            
            file_types = {}
            for metadata in self.metadata_cache.values():
                file_type = metadata.file_type
                file_types[file_type] = file_types.get(file_type, 0) + 1
            
            return {
                "total_documents": total_docs,
                "indexed_documents": indexed_docs,
                "failed_documents": failed_docs,
                "total_segments": total_segments,
                "file_types": file_types,
                "cache_size": len(self.metadata_cache)
            }
            
        except Exception as e:
            logger.error(f"统计信息获取失败: {str(e)}")
            return {}