from typing import Dict, Any, Optional, List
from datetime import datetime
import json
import uuid
from pathlib import Path
import pdfplumber
from docx import Document
import whoosh
from whoosh import index
from whoosh.fields import Schema, TEXT, KEYWORD, DATETIME, NUMERIC
from whoosh.qparser import QueryParser, MultifieldParser
from whoosh.query import Every
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity

from utils.logger import Logger
from utils.exceptions import ToolError

logger = get_logger("document_manager")


class DocumentManager:
    """文档管理器：负责文档的加载、元数据提取和索引构建"""

    def __init__(self, index_dir: str = "/tmp/whoosh_index"):
        self.index_dir = Path(index_dir)
        self.index_dir.mkdir(parents=True, exist_ok=True)
        self.vectorizer = TfidfVectorizer(max_features=1000, stop_words='english')
        self._init_index()

    def _init_index(self):
        """初始化Whoosh索引"""
        schema = Schema(
            segment_id=KEYWORD(stored=True),
            doc_id=KEYWORD(stored=True),
            content=TEXT(stored=True),
            title=TEXT(stored=True),
            chapter=KEYWORD(stored=True),
            page_num=NUMERIC(stored=True),
            segment_type=KEYWORD(stored=True),
            timestamp=DATETIME(stored=True),
            metadata=TEXT(stored=True)
        )
        
        if not index.exists_in(self.index_dir):
            self.ix = index.create_in(self.index_dir, schema)
        else:
            self.ix = index.open_dir(self.index_dir)
        
        logger.info(f"Whoosh索引初始化完成: {self.index_dir}")

    def detect_doc_type(self, file_path: str) -> str:
        """检测文档类型"""
        path = Path(file_path)
        suffix = path.suffix.lower()
        
        doc_types = {
            '.pdf': 'pdf',
            '.docx': 'docx',
            '.doc': 'docx',
            '.txt': 'txt',
            '.md': 'txt'
        }
        
        return doc_types.get(suffix, 'txt')

    def extract_pdf_metadata(self, file_path: str) -> Dict[str, Any]:
        """提取PDF元数据"""
        try:
            with pdfplumber.open(file_path) as pdf:
                metadata = {
                    "page_count": len(pdf.pages),
                    "title": pdf.metadata.get("Title", ""),
                    "author": pdf.metadata.get("Author", ""),
                    "subject": pdf.metadata.get("Subject", ""),
                    "creator": pdf.metadata.get("Creator", ""),
                    "producer": pdf.metadata.get("Producer", ""),
                    "creation_date": str(pdf.metadata.get("CreationDate", "")),
                    "mod_date": str(pdf.metadata.get("ModDate", ""))
                }
                
                # 提取章节信息
                metadata["chapters"] = self._extract_pdf_chapters(pdf)
                metadata["chapter_count"] = len(metadata["chapters"])
                
                return metadata
        except Exception as e:
            logger.error(f"PDF元数据提取失败: {file_path}, error={str(e)}")
            return {"page_count": 0, "title": "", "author": "", "chapters": []}

    def extract_docx_metadata(self, file_path: str) -> Dict[str, Any]:
        """提取DOCX元数据"""
        try:
            doc = Document(file_path)
            
            metadata = {
                "paragraph_count": len(doc.paragraphs),
                "title": doc.core_properties.title or "",
                "author": doc.core_properties.author or "",
                "subject": doc.core_properties.subject or "",
                "created": str(doc.core_properties.created) if doc.core_properties.created else "",
                "modified": str(doc.core_properties.modified) if doc.core_properties.modified else ""
            }
            
            # 提取章节信息
            metadata["chapters"] = self._extract_docx_chapters(doc)
            metadata["chapter_count"] = len(metadata["chapters"])
            
            return metadata
        except Exception as e:
            logger.error(f"DOCX元数据提取失败: {file_path}, error={str(e)}")
            return {"paragraph_count": 0, "title": "", "author": "", "chapters": []}

    def extract_txt_metadata(self, file_path: str) -> Dict[str, Any]:
        """提取TXT元数据"""
        try:
            with open(file_path, 'r', encoding='utf-8') as f:
                content = f.read()
                lines = content.split('\n')
                
                metadata = {
                    "line_count": len(lines),
                    "char_count": len(content),
                    "word_count": len(content.split()),
                    "encoding": "utf-8"
                }
                
                # 提取章节信息（基于标题格式）
                metadata["chapters"] = self._extract_txt_chapters(lines)
                metadata["chapter_count"] = len(metadata["chapters"])
                
                return metadata
        except Exception as e:
            logger.error(f"TXT元数据提取失败: {file_path}, error={str(e)}")
            return {"line_count": 0, "char_count": 0, "word_count": 0, "chapters": []}

    def _extract_pdf_chapters(self, pdf) -> List[Dict[str, Any]]:
        """提取PDF章节信息"""
        chapters = []
        try:
            for page_num, page in enumerate(pdf.pages, 1):
                text = page.extract_text()
                if text:
                    # 简单的章节检测（基于字体大小和格式）
                    lines = text.split('\n')
                    for line_num, line in enumerate(lines):
                        line = line.strip()
                        if self._is_chapter_title(line):
                            chapters.append({
                                "title": line,
                                "page_num": page_num,
                                "line_num": line_num,
                                "level": self._get_title_level(line)
                            })
        except Exception as e:
            logger.warning(f"PDF章节提取失败: {str(e)}")
        
        return chapters

    def _extract_docx_chapters(self, doc) -> List[Dict[str, Any]]:
        """提取DOCX章节信息"""
        chapters = []
        try:
            for para_num, paragraph in enumerate(doc.paragraphs):
                text = paragraph.text.strip()
                if text and self._is_chapter_title(text):
                    chapters.append({
                        "title": text,
                        "paragraph_num": para_num,
                        "style": paragraph.style.name,
                        "level": self._get_title_level(text)
                    })
        except Exception as e:
            logger.warning(f"DOCX章节提取失败: {str(e)}")
        
        return chapters

    def _extract_txt_chapters(self, lines: List[str]) -> List[Dict[str, Any]]:
        """提取TXT章节信息"""
        chapters = []
        try:
            for line_num, line in enumerate(lines):
                line = line.strip()
                if self._is_chapter_title(line):
                    chapters.append({
                        "title": line,
                        "line_num": line_num,
                        "level": self._get_title_level(line)
                    })
        except Exception as e:
            logger.warning(f"TXT章节提取失败: {str(e)}")
        
        return chapters

    def _is_chapter_title(self, text: str) -> bool:
        """判断是否为章节标题"""
        if not text or len(text) > 100:
            return False
        
        # 简单的标题检测规则
        title_patterns = [
            r'^第[一二三四五六七八九十]+章',  # 中文章节
            r'^第\d+章',  # 数字章节
            r'^\d+\.\s+',  # 数字编号
            r'^[A-Z][a-zA-Z\s]{1,50}$',  # 英文标题
            r'^\s*[\*\#\=\-]{3,}\s*$',  # Markdown标题标记
        ]
        
        import re
        for pattern in title_patterns:
            if re.match(pattern, text):
                return True
        
        return False

    def _get_title_level(self, text: str) -> int:
        """获取标题级别"""
        if not text:
            return 3
        
        # 基于格式判断级别
        if text.startswith('第') and '章' in text:
            return 1  # 一级标题
        elif text.startswith('第') and '节' in text:
            return 2  # 二级标题
        elif text.startswith(('一、', '二、', '三、')):
            return 2
        elif text[0].isdigit() and '.' in text:
            return 2
        else:
            return 3  # 默认三级标题

    def build_segment_index(self, doc_id: str, segments: List[Dict[str, Any]]) -> bool:
        """构建段落索引"""
        try:
            writer = self.ix.writer()
            
            for segment in segments:
                writer.add_document(
                    segment_id=segment.get("segment_id", str(uuid.uuid4())),
                    doc_id=doc_id,
                    content=segment.get("content", ""),
                    title=segment.get("title", ""),
                    chapter=segment.get("chapter", ""),
                    page_num=segment.get("page_num", 0),
                    segment_type=segment.get("type", "paragraph"),
                    timestamp=datetime.utcnow(),
                    metadata=json.dumps(segment.get("metadata", {}))
                )
            
            writer.commit()
            logger.info(f"段落索引构建完成: doc_id={doc_id}, segments={len(segments)}")
            return True
            
        except Exception as e:
            logger.error(f"段落索引构建失败: {str(e)}")
            return False

    def search_from_index(self, query: str, top_k: int = 5, 
                         metadata_filter: Optional[Dict[str, Any]] = None) -> List[Dict[str, Any]]:
        """从索引中搜索相关段落"""
        try:
            with self.ix.searcher() as searcher:
                # 构建查询
                query_parser = MultifieldParser(["content", "title"], self.ix.schema)
                query_obj = query_parser.parse(query)
                
                # 执行搜索
                results = searcher.search(query_obj, limit=top_k)
                
                segments = []
                for result in results:
                    segment = {
                        "segment_id": result["segment_id"],
                        "content": result["content"],
                        "title": result["title"],
                        "chapter": result["chapter"],
                        "page_num": result["page_num"],
                        "score": result.score,
                        "similarity": result.score / 10.0  # 归一化相似度
                    }
                    segments.append(segment)
                
                logger.info(f"索引搜索完成: query='{query}', results={len(segments)}")
                return segments
                
        except Exception as e:
            logger.error(f"索引搜索失败: {str(e)}")
            return []

    def get_document_info(self, doc_id: str) -> Dict[str, Any]:
        """获取文档信息"""
        try:
            with self.ix.searcher() as searcher:
                query = whoosh.query.Term("doc_id", doc_id)
                results = searcher.search(query, limit=1)
                
                if results:
                    result = results[0]
                    return {
                        "doc_id": doc_id,
                        "title": result.get("title", ""),
                        "segment_count": len(list(searcher.search(query))),
                        "indexed_at": str(result.get("timestamp", ""))
                    }
                
                return {}
                
        except Exception as e:
            logger.error(f"获取文档信息失败: {str(e)}")
            return {}