from typing import Dict, Any, Optional, List, Tuple
from datetime import datetime
import json
import asyncio
from pathlib import Path
import whoosh
from whoosh import index
from whoosh.fields import Schema, TEXT, ID, DATETIME, NUMERIC
from whoosh.qparser import QueryParser, MultifieldParser
from whoosh.query import Every, Term, And, Or
import numpy as np

from utils.logger import Logger
from utils.exceptions import ToolError
from .segment_analyzer import SegmentAnalyzer

logger = get_logger("doc_search_engine")


class DocSearchEngine:
    """文档搜索引擎：基于Whoosh实现全文检索和向量检索"""

    def __init__(self, index_dir: str = "./data/doc_index"):
        self.index_dir = Path(index_dir)
        self.index_dir.mkdir(parents=True, exist_ok=True)
        self.analyzer = SegmentAnalyzer()
        
        # 定义文档索引模式
        self.schema = Schema(
            doc_id=ID(stored=True, unique=True),
            file_path=ID(stored=True),
            file_name=TEXT(stored=True),
            file_type=TEXT(stored=True),
            title=TEXT(stored=True),
            content=TEXT(stored=True),
            segment_type=TEXT(stored=True),  # chapter, page, paragraph, section, intelligent
            metadata=TEXT(stored=True),  # JSON格式的元数据
            page_num=NUMERIC(stored=True),
            line_start=NUMERIC(stored=True),
            line_end=NUMERIC(stored=True),
            word_count=NUMERIC(stored=True),
            char_count=NUMERIC(stored=True),
            created_at=DATETIME(stored=True),
            updated_at=DATETIME(stored=True)
        )
        
        self._create_index()

    def _create_index(self):
        """创建或打开索引"""
        try:
            if index.exists_in(str(self.index_dir)):
                self.ix = index.open_dir(str(self.index_dir))
                logger.info(f"打开现有索引: {self.index_dir}")
            else:
                self.ix = index.create_in(str(self.index_dir), self.schema)
                logger.info(f"创建新索引: {self.index_dir}")
        except Exception as e:
            logger.error(f"索引创建失败: {str(e)}")
            raise ToolError(f"索引创建失败: {str(e)}")

    def index_document(self, file_path: str, metadata: Optional[Dict[str, Any]] = None) -> bool:
        """索引单个文档"""
        try:
            file_path = Path(file_path)
            if not file_path.exists():
                logger.error(f"文件不存在: {file_path}")
                return False
            
            # 生成文档ID
            doc_id = f"{file_path.stem}_{int(file_path.stat().st_mtime)}"
            
            # 提取各种段落
            segments = self._extract_segments(str(file_path))
            
            # 索引每个段落
            writer = self.ix.writer()
            try:
                for segment in segments:
                    segment_doc_id = f"{doc_id}_{segment['type']}_{segment.get('segment_num', 0)}"
                    
                    # 准备索引文档
                    doc = {
                        "doc_id": segment_doc_id,
                        "file_path": str(file_path),
                        "file_name": file_path.name,
                        "file_type": file_path.suffix.lower(),
                        "title": segment.get("title", ""),
                        "content": segment.get("content", ""),
                        "segment_type": segment.get("type", "unknown"),
                        "metadata": json.dumps(metadata or {}),
                        "page_num": segment.get("page_num", 1),
                        "line_start": segment.get("start_line", 0),
                        "line_end": segment.get("end_line", 0),
                        "word_count": segment.get("word_count", 0),
                        "char_count": segment.get("char_count", 0),
                        "created_at": datetime.now(),
                        "updated_at": datetime.now()
                    }
                    
                    writer.update_document(**doc)
                
                writer.commit()
                logger.info(f"文档索引完成: {file_path.name}, 段落数: {len(segments)}")
                return True
                
            except Exception as e:
                writer.cancel()
                logger.error(f"索引写入失败: {str(e)}")
                return False
                
        except Exception as e:
            logger.error(f"文档索引失败: {file_path}, error={str(e)}")
            return False

    def search_documents(self, query: str, 
                        segment_types: Optional[List[str]] = None,
                        file_types: Optional[List[str]] = None,
                        limit: int = 10) -> List[Dict[str, Any]]:
        """搜索文档"""
        try:
            with self.ix.searcher() as searcher:
                # 构建查询
                query_parts = []
                
                # 文本搜索
                if query:
                    parser = MultifieldParser(["title", "content"], self.ix.schema)
                    text_query = parser.parse(query)
                    query_parts.append(text_query)
                
                # 段落类型过滤
                if segment_types:
                    type_queries = [Term("segment_type", seg_type) for seg_type in segment_types]
                    if len(type_queries) > 1:
                        query_parts.append(Or(type_queries))
                    else:
                        query_parts.append(type_queries[0])
                
                # 文件类型过滤
                if file_types:
                    file_type_queries = [Term("file_type", file_type) for file_type in file_types]
                    if len(file_type_queries) > 1:
                        query_parts.append(Or(file_type_queries))
                    else:
                        query_parts.append(file_type_queries[0])
                
                # 组合查询
                if len(query_parts) > 1:
                    final_query = And(query_parts)
                elif len(query_parts) == 1:
                    final_query = query_parts[0]
                else:
                    final_query = Every()
                
                # 执行搜索
                results = searcher.search(final_query, limit=limit)
                
                # 格式化结果
                search_results = []
                for result in results:
                    search_results.append({
                        "doc_id": result["doc_id"],
                        "file_path": result["file_path"],
                        "file_name": result["file_name"],
                        "title": result["title"],
                        "content": result["content"],
                        "segment_type": result["segment_type"],
                        "page_num": result["page_num"],
                        "line_start": result["line_start"],
                        "line_end": result["line_end"],
                        "word_count": result["word_count"],
                        "char_count": result["char_count"],
                        "score": result.score
                    })
                
                logger.info(f"搜索完成: query='{query}', 结果数: {len(search_results)}")
                return search_results
                
        except Exception as e:
            logger.error(f"搜索失败: {str(e)}")
            return []

    def search_by_similarity(self, query: str, segments: List[Dict[str, Any]], 
                           top_k: int = 5, similarity_threshold: float = 0.7) -> List[Dict[str, Any]]:
        """基于相似度的搜索"""
        try:
            return self.analyzer.search_relevant_segments(
                query, segments, top_k, similarity_threshold
            )
        except Exception as e:
            logger.error(f"相似度搜索失败: {str(e)}")
            return []

    def get_document_segments(self, file_path: str, 
                            segment_types: Optional[List[str]] = None) -> List[Dict[str, Any]]:
        """获取文档的段落"""
        try:
            with self.ix.searcher() as searcher:
                # 构建查询
                query_parts = [Term("file_path", str(file_path))]
                
                if segment_types:
                    type_queries = [Term("segment_type", seg_type) for seg_type in segment_types]
                    if len(type_queries) > 1:
                        query_parts.append(Or(type_queries))
                    else:
                        query_parts.append(type_queries[0])
                
                final_query = And(query_parts)
                results = searcher.search(final_query, limit=1000)
                
                # 格式化结果
                segments = []
                for result in results:
                    segments.append({
                        "doc_id": result["doc_id"],
                        "title": result["title"],
                        "content": result["content"],
                        "segment_type": result["segment_type"],
                        "page_num": result["page_num"],
                        "line_start": result["line_start"],
                        "line_end": result["line_end"],
                        "word_count": result["word_count"],
                        "char_count": result["char_count"]
                    })
                
                # 按页码和行号排序
                segments.sort(key=lambda x: (x["page_num"], x["line_start"]))
                
                logger.info(f"获取文档段落完成: {file_path}, 段落数: {len(segments)}")
                return segments
                
        except Exception as e:
            logger.error(f"获取文档段落失败: {str(e)}")
            return []

    def delete_document(self, file_path: str) -> bool:
        """删除文档索引"""
        try:
            writer = self.ix.writer()
            writer.delete_by_term("file_path", str(file_path))
            writer.commit()
            
            logger.info(f"文档索引删除完成: {file_path}")
            return True
            
        except Exception as e:
            logger.error(f"文档索引删除失败: {str(e)}")
            return False

    def get_document_metadata(self, file_path: str) -> Dict[str, Any]:
        """获取文档元数据"""
        try:
            with self.ix.searcher() as searcher:
                # 获取文档的所有段落
                results = searcher.search(Term("file_path", str(file_path)), limit=1000)
                
                if not results:
                    return {}
                
                # 计算统计信息
                total_segments = len(results)
                segment_types = {}
                total_words = 0
                total_chars = 0
                
                for result in results:
                    seg_type = result["segment_type"]
                    segment_types[seg_type] = segment_types.get(seg_type, 0) + 1
                    total_words += result["word_count"]
                    total_chars += result["char_count"]
                
                # 获取基本信息
                first_result = results[0]
                
                metadata = {
                    "file_path": str(file_path),
                    "file_name": first_result["file_name"],
                    "file_type": first_result["file_type"],
                    "total_segments": total_segments,
                    "segment_types": segment_types,
                    "total_words": total_words,
                    "total_chars": total_chars,
                    "indexed_at": first_result["created_at"]
                }
                
                logger.info(f"获取文档元数据完成: {file_path}")
                return metadata
                
        except Exception as e:
            logger.error(f"获取文档元数据失败: {str(e)}")
            return {}

    def _extract_segments(self, file_path: str) -> List[Dict[str, Any]]:
        """提取文档的各种段落"""
        segments = []
        
        try:
            # 提取章节
            chapters = self.analyzer.extract_chapters(file_path)
            segments.extend(chapters)
            
            # 提取页面
            pages = self.analyzer.extract_pages(file_path)
            segments.extend(pages)
            
            # 提取自然段落
            paragraphs = self.analyzer.extract_paragraphs(file_path)
            segments.extend(paragraphs)
            
            # 提取结构化段落
            sections = self.analyzer.extract_sections(file_path)
            segments.extend(sections)
            
            # 智能分段
            intelligent_segments = self.analyzer.intelligent_segment(file_path)
            segments.extend(intelligent_segments)
            
            logger.info(f"段落提取完成: {Path(file_path).name}, 总数: {len(segments)}")
            return segments
            
        except Exception as e:
            logger.error(f"段落提取失败: {file_path}, error={str(e)}")
            return []

    def optimize_index(self) -> bool:
        """优化索引"""
        try:
            writer = self.ix.writer()
            writer.commit(merge=True)
            
            logger.info("索引优化完成")
            return True
            
        except Exception as e:
            logger.error(f"索引优化失败: {str(e)}")
            return False