"""
搜索引擎管理器
集成MCP爬虫实时搜索和Whoosh全文检索
"""

import os
import json
import logging
import asyncio
import aiohttp
from typing import List, Dict, Any, Optional, Set
from dataclasses import dataclass, field
from datetime import datetime, timedelta
from urllib.parse import urljoin, urlparse
import hashlib
import re

# Whoosh相关导入
try:
    from whoosh import index
    from whoosh.fields import Schema, TEXT, KEYWORD, DATETIME, NUMERIC
    from whoosh.qparser import QueryParser, MultifieldParser
    from whoosh.query import Every, Term, And, Or, Not
    from whoosh.analysis import StemmingAnalyzer
    WHOOSH_AVAILABLE = True
except ImportError:
    WHOOSH_AVAILABLE = False
    logging.warning("Whoosh库未安装，全文检索功能将不可用")

logger = logging.getLogger(__name__)


@dataclass
class SearchDocument:
    """搜索文档"""
    id: str
    title: str
    content: str
    url: str
    doc_type: str = "text"
    tags: List[str] = field(default_factory=list)
    metadata: Dict[str, Any] = field(default_factory=dict)
    created_at: datetime = field(default_factory=datetime.now)
    updated_at: datetime = field(default_factory=datetime.now)
    score: float = 0.0


@dataclass
class SearchResult:
    """搜索结果"""
    documents: List[SearchDocument]
    total_count: int
    query_time: float
    has_more: bool = False
    next_offset: int = 0


class MCPCrawler:
    """MCP爬虫实现"""
    
    def __init__(self, config: Optional[Dict[str, Any]] = None):
        self.config = config or {}
        self.session: Optional[aiohttp.ClientSession] = None
        self.visited_urls: Set[str] = set()
        self.max_depth = self.config.get('max_depth', 3)
        self.max_pages = self.config.get('max_pages', 100)
        self.delay = self.config.get('delay', 1.0)
        self.timeout = self.config.get('timeout', 30)
        self.user_agent = self.config.get('user_agent', 'MAESS-Bot/1.0')
        self.robots_txt_cache: Dict[str, Dict[str, Any]] = {}
        
        logger.info(f"MCP爬虫初始化完成，最大深度: {self.max_depth}, 最大页面: {self.max_pages}")
    
    async def __aenter__(self):
        """异步上下文管理器入口"""
        connector = aiohttp.TCPConnector(limit=20, limit_per_host=5)
        timeout = aiohttp.ClientTimeout(total=self.timeout)
        
        self.session = aiohttp.ClientSession(
            connector=connector,
            timeout=timeout,
            headers={'User-Agent': self.user_agent}
        )
        return self
    
    async def __aexit__(self, exc_type, exc_val, exc_tb):
        """异步上下文管理器出口"""
        if self.session:
            await self.session.close()
    
    async def crawl_url(self, url: str, depth: int = 0) -> Optional[SearchDocument]:
        """
        爬取单个URL
        
        Args:
            url: 要爬取的URL
            depth: 当前深度
            
        Returns:
            搜索文档或None
        """
        if depth > self.max_depth or url in self.visited_urls:
            return None
        
        self.visited_urls.add(url)
        
        try:
            logger.debug(f"开始爬取: {url}, 深度: {depth}")
            
            # 检查robots.txt
            if not await self._check_robots_txt(url):
                logger.warning(f"Robots.txt禁止爬取: {url}")
                return None
            
            # 发送请求
            async with self.session.get(url) as response:
                if response.status != 200:
                    logger.warning(f"HTTP错误 {response.status}: {url}")
                    return None
                
                content_type = response.headers.get('content-type', '').lower()
                
                # 处理HTML内容
                if 'text/html' in content_type:
                    html_content = await response.text()
                    return await self._parse_html(url, html_content, depth)
                
                # 处理JSON内容
                elif 'application/json' in content_type:
                    json_content = await response.json()
                    return self._parse_json(url, json_content)
                
                # 处理纯文本
                elif 'text/plain' in content_type:
                    text_content = await response.text()
                    return self._parse_text(url, text_content)
                
                else:
                    logger.warning(f"不支持的Content-Type: {content_type}, URL: {url}")
                    return None
        
        except Exception as e:
            logger.error(f"爬取失败: {url}, 错误: {str(e)}")
            return None
        
        finally:
            # 延迟以避免过于频繁的请求
            await asyncio.sleep(self.delay)
    
    async def _parse_html(self, url: str, html_content: str, depth: int) -> SearchDocument:
        """解析HTML内容"""
        try:
            from bs4 import BeautifulSoup
            
            soup = BeautifulSoup(html_content, 'html.parser')
            
            # 提取标题
            title = soup.title.string if soup.title else url
            title = title.strip() if title else url
            
            # 提取主要内容
            # 移除script和style标签
            for script in soup(["script", "style"]):
                script.decompose()
            
            # 提取文本内容
            text_content = soup.get_text(separator=' ', strip=True)
            
            # 提取链接用于后续爬取
            if depth < self.max_depth and len(self.visited_urls) < self.max_pages:
                links = soup.find_all('a', href=True)
                for link in links:
                    href = link['href']
                    full_url = urljoin(url, href)
                    
                    # 检查URL是否有效且未访问过
                    if self._is_valid_url(full_url) and full_url not in self.visited_urls:
                        # 这里可以添加异步任务来爬取子链接
                        pass
            
            # 生成文档ID
            doc_id = self._generate_doc_id(url)
            
            return SearchDocument(
                id=doc_id,
                title=title,
                content=text_content[:10000],  # 限制内容长度
                url=url,
                doc_type="html",
                tags=["web", "html"],
                metadata={
                    'content_type': 'text/html',
                    'content_length': len(html_content),
                    'crawl_depth': depth,
                    'links_found': len(links) if 'links' in locals() else 0
                }
            )
        
        except Exception as e:
            logger.error(f"HTML解析失败: {url}, 错误: {str(e)}")
            # 返回基础文档
            return SearchDocument(
                id=self._generate_doc_id(url),
                title=url,
                content=html_content[:5000],
                url=url,
                doc_type="html",
                tags=["web", "html", "parse_error"],
                metadata={'parse_error': str(e)}
            )
    
    def _parse_json(self, url: str, json_content: Dict[str, Any]) -> SearchDocument:
        """解析JSON内容"""
        try:
            # 尝试提取有意义的文本内容
            text_content = self._extract_text_from_json(json_content)
            
            # 生成标题
            title = json_content.get('title', '') or json_content.get('name', '') or url
            
            return SearchDocument(
                id=self._generate_doc_id(url),
                title=title,
                content=text_content,
                url=url,
                doc_type="json",
                tags=["api", "json"],
                metadata={
                    'content_type': 'application/json',
                    'json_keys': list(json_content.keys())[:10]  # 只记录前10个键
                }
            )
        
        except Exception as e:
            logger.error(f"JSON解析失败: {url}, 错误: {str(e)}")
            return SearchDocument(
                id=self._generate_doc_id(url),
                title=url,
                content=str(json_content)[:5000],
                url=url,
                doc_type="json",
                tags=["api", "json", "parse_error"],
                metadata={'parse_error': str(e)}
            )
    
    def _parse_text(self, url: str, text_content: str) -> SearchDocument:
        """解析纯文本内容"""
        lines = text_content.strip().split('\n')
        title = lines[0] if lines else url
        
        return SearchDocument(
            id=self._generate_doc_id(url),
            title=title,
            content=text_content,
            url=url,
            doc_type="text",
            tags=["text"],
            metadata={
                'content_type': 'text/plain',
                'line_count': len(lines)
            }
        )
    
    def _extract_text_from_json(self, data: Any, max_depth: int = 3) -> str:
        """从JSON数据中提取文本"""
        if max_depth <= 0:
            return str(data)
        
        if isinstance(data, dict):
            texts = []
            for key, value in data.items():
                if isinstance(value, (str, int, float)):
                    texts.append(f"{key}: {value}")
                elif isinstance(value, (dict, list)):
                    texts.append(f"{key}: {self._extract_text_from_json(value, max_depth - 1)}")
            return ' '.join(texts)
        
        elif isinstance(data, list):
            texts = []
            for item in data[:10]:  # 只处理前10个元素
                texts.append(self._extract_text_from_json(item, max_depth - 1))
            return ' '.join(texts)
        
        else:
            return str(data)
    
    def _is_valid_url(self, url: str) -> bool:
        """检查URL是否有效"""
        try:
            parsed = urlparse(url)
            return bool(parsed.netloc and parsed.scheme in ['http', 'https'])
        except Exception:
            return False
    
    def _generate_doc_id(self, url: str) -> str:
        """生成文档ID"""
        return hashlib.md5(url.encode()).hexdigest()
    
    async def _check_robots_txt(self, url: str) -> bool:
        """检查robots.txt"""
        try:
            parsed_url = urlparse(url)
            robots_url = f"{parsed_url.scheme}://{parsed_url.netloc}/robots.txt"
            
            # 检查缓存
            if robots_url in self.robots_txt_cache:
                cache_entry = self.robots_txt_cache[robots_url]
                if datetime.now() - cache_entry['timestamp'] < timedelta(hours=1):
                    return cache_entry['allowed']
            
            # 获取robots.txt
            async with self.session.get(robots_url) as response:
                if response.status == 200:
                    robots_content = await response.text()
                    # 简单的robots.txt解析
                    allowed = self._parse_robots_txt(robots_content, parsed_url.path)
                else:
                    allowed = True  # 如果没有robots.txt，默认允许
            
            # 缓存结果
            self.robots_txt_cache[robots_url] = {
                'allowed': allowed,
                'timestamp': datetime.now()
            }
            
            return allowed
        
        except Exception as e:
            logger.warning(f"检查robots.txt失败: {url}, 错误: {str(e)}")
            return True  # 出错时默认允许
    
    def _parse_robots_txt(self, content: str, path: str) -> bool:
        """简单解析robots.txt"""
        lines = content.lower().split('\n')
        user_agent_applies = False
        disallowed_paths = []
        
        for line in lines:
            line = line.strip()
            if line.startswith('user-agent:'):
                user_agent = line.split(':', 1)[1].strip()
                user_agent_applies = user_agent == '*' or 'maess' in user_agent
            elif line.startswith('disallow:') and user_agent_applies:
                disallow_path = line.split(':', 1)[1].strip()
                if disallow_path:
                    disallowed_paths.append(disallow_path)
        
        # 检查路径是否被禁止
        for disallow_path in disallowed_paths:
            if path.startswith(disallow_path):
                return False
        
        return True


class WhooshSearchEngine:
    """Whoosh全文搜索引擎"""
    
    def __init__(self, index_dir: str = "whoosh_index"):
        self.index_dir = index_dir
        self.index = None
        self.writer = None
        self.schema = self._create_schema()
        
        # 确保索引目录存在
        os.makedirs(index_dir, exist_ok=True)
        
        # 创建或打开索引
        if WHOOSH_AVAILABLE:
            self._init_index()
        
        logger.info(f"Whoosh搜索引擎初始化完成，索引目录: {index_dir}")
    
    def _create_schema(self):
        """创建索引模式"""
        return Schema(
            id=KEYWORD(stored=True, unique=True),
            title=TEXT(stored=True, analyzer=StemmingAnalyzer()),
            content=TEXT(stored=True, analyzer=StemmingAnalyzer()),
            url=KEYWORD(stored=True),
            doc_type=KEYWORD(stored=True),
            tags=KEYWORD(stored=True),
            created_at=DATETIME(stored=True),
            updated_at=DATETIME(stored=True),
            metadata=TEXT(stored=True)  # JSON字符串存储
        )
    
    def _init_index(self):
        """初始化索引"""
        try:
            if index.exists_in(self.index_dir):
                self.index = index.open_dir(self.index_dir)
            else:
                self.index = index.create_in(self.index_dir, self.schema)
        
        except Exception as e:
            logger.error(f"初始化Whoosh索引失败: {str(e)}")
            # 尝试重新创建索引
            try:
                import shutil
                shutil.rmtree(self.index_dir, ignore_errors=True)
                os.makedirs(self.index_dir, exist_ok=True)
                self.index = index.create_in(self.index_dir, self.schema)
            except Exception as e2:
                logger.error(f"重新创建Whoosh索引失败: {str(e2)}")
                raise
    
    def add_document(self, document: SearchDocument) -> bool:
        """
        添加文档到索引
        
        Args:
            document: 搜索文档
            
        Returns:
            是否成功添加
        """
        if not WHOOSH_AVAILABLE or not self.index:
            logger.warning("Whoosh不可用，无法添加文档")
            return False
        
        try:
            writer = self.index.writer()
            
            writer.update_document(
                id=document.id,
                title=document.title,
                content=document.content,
                url=document.url,
                doc_type=document.doc_type,
                tags=','.join(document.tags),
                created_at=document.created_at,
                updated_at=document.updated_at,
                metadata=json.dumps(document.metadata)
            )
            
            writer.commit()
            logger.debug(f"文档添加到索引: {document.title} ({document.id})")
            return True
        
        except Exception as e:
            logger.error(f"添加文档到索引失败: {document.title}, 错误: {str(e)}")
            return False
    
    def search(self, query_string: str, limit: int = 10, 
               doc_type: Optional[str] = None, tags: Optional[List[str]] = None) -> SearchResult:
        """
        搜索文档
        
        Args:
            query_string: 查询字符串
            limit: 返回结果数量限制
            doc_type: 文档类型过滤
            tags: 标签过滤
            
        Returns:
            搜索结果
        """
        if not WHOOSH_AVAILABLE or not self.index:
            logger.warning("Whoosh不可用，返回空搜索结果")
            return SearchResult(documents=[], total_count=0, query_time=0.0)
        
        start_time = time.time()
        
        try:
            with self.index.searcher() as searcher:
                # 创建查询解析器
                parser = MultifieldParser(["title", "content"], self.index.schema)
                query = parser.parse(query_string)
                
                # 添加过滤条件
                if doc_type or tags:
                    filter_query = Every()
                    
                    if doc_type:
                        filter_query = And([filter_query, Term("doc_type", doc_type)])
                    
                    if tags:
                        tag_queries = [Term("tags", tag) for tag in tags]
                        filter_query = And([filter_query, Or(tag_queries)])
                    
                    # 组合查询
                    query = And([query, filter_query])
                
                # 执行搜索
                results = searcher.search(query, limit=limit)
                
                # 转换结果
                documents = []
                for result in results:
                    doc_data = dict(result)
                    
                    document = SearchDocument(
                        id=doc_data['id'],
                        title=doc_data['title'],
                        content=doc_data['content'],
                        url=doc_data['url'],
                        doc_type=doc_data['doc_type'],
                        tags=doc_data['tags'].split(',') if doc_data['tags'] else [],
                        created_at=doc_data['created_at'],
                        updated_at=doc_data['updated_at'],
                        metadata=json.loads(doc_data['metadata']) if doc_data['metadata'] else {},
                        score=result.score
                    )
                    
                    documents.append(document)
                
                query_time = time.time() - start_time
                
                logger.info(f"搜索完成: '{query_string}', 找到 {len(documents)} 个结果, 用时 {query_time:.3f}秒")
                
                return SearchResult(
                    documents=documents,
                    total_count=len(documents),
                    query_time=query_time
                )
        
        except Exception as e:
            logger.error(f"搜索失败: '{query_string}', 错误: {str(e)}")
            return SearchResult(documents=[], total_count=0, query_time=time.time() - start_time)
    
    def delete_document(self, doc_id: str) -> bool:
        """
        从索引中删除文档
        
        Args:
            doc_id: 文档ID
            
        Returns:
            是否成功删除
        """
        if not WHOOSH_AVAILABLE or not self.index:
            logger.warning("Whoosh不可用，无法删除文档")
            return False
        
        try:
            writer = self.index.writer()
            writer.delete_by_term('id', doc_id)
            writer.commit()
            
            logger.debug(f"文档从索引删除: {doc_id}")
            return True
        
        except Exception as e:
            logger.error(f"从索引删除文档失败: {doc_id}, 错误: {str(e)}")
            return False
    
    def get_index_stats(self) -> Dict[str, Any]:
        """获取索引统计信息"""
        if not WHOOSH_AVAILABLE or not self.index:
            return {'error': 'Whoosh不可用'}
        
        try:
            with self.index.reader() as reader:
                return {
                    'document_count': reader.doc_count(),
                    'field_count': len(reader.schema.names()),
                    'fields': list(reader.schema.names()),
                    'last_modified': reader.last_modified(),
                    'index_size': os.path.getsize(self.index_dir) if os.path.exists(self.index_dir) else 0
                }
        
        except Exception as e:
            logger.error(f"获取索引统计信息失败: {str(e)}")
            return {'error': str(e)}


class SearchEngineManager:
    """搜索引擎管理器"""
    
    def __init__(self, whoosh_index_dir: str = "whoosh_index", mcp_config: Optional[Dict[str, Any]] = None):
        self.whoosh_engine = WhooshSearchEngine(whoosh_index_dir)
        self.mcp_crawler = MCPCrawler(mcp_config)
        self.documents_cache: Dict[str, SearchDocument] = {}
        self.logger = logging.getLogger(__name__)
        
        logger.info("搜索引擎管理器初始化完成")
    
    async def crawl_and_index(self, urls: List[str], max_depth: int = 2) -> List[str]:
        """
        爬取URL并索引内容
        
        Args:
            urls: 要爬取的URL列表
            max_depth: 最大爬取深度
            
        Returns:
            成功索引的文档ID列表
        """
        indexed_docs = []
        
        async with self.mcp_crawler as crawler:
            crawler.max_depth = max_depth
            
            for url in urls:
                try:
                    # 爬取单个URL
                    document = await crawler.crawl_url(url)
                    
                    if document:
                        # 添加到Whoosh索引
                        if self.whoosh_engine.add_document(document):
                            indexed_docs.append(document.id)
                            self.documents_cache[document.id] = document
                            self.logger.info(f"爬取并索引成功: {url} -> {document.id}")
                    
                    # 检查是否达到最大页面限制
                    if len(crawler.visited_urls) >= crawler.max_pages:
                        break
                
                except Exception as e:
                    self.logger.error(f"爬取并索引失败: {url}, 错误: {str(e)}")
                    continue
        
        return indexed_docs
    
    def search_documents(self, query: str, limit: int = 10, **filters) -> SearchResult:
        """
        搜索文档
        
        Args:
            query: 搜索查询
            limit: 返回结果数量限制
            **filters: 过滤条件
            
        Returns:
            搜索结果
        """
        return self.whoosh_engine.search(query, limit, **filters)
    
    def add_document(self, document: SearchDocument) -> bool:
        """
        添加文档到搜索引擎
        
        Args:
            document: 搜索文档
            
        Returns:
            是否成功添加
        """
        success = self.whoosh_engine.add_document(document)
        if success:
            self.documents_cache[document.id] = document
        return success
    
    def get_document(self, doc_id: str) -> Optional[SearchDocument]:
        """获取文档"""
        return self.documents_cache.get(doc_id)
    
    def delete_document(self, doc_id: str) -> bool:
        """删除文档"""
        success = self.whoosh_engine.delete_document(doc_id)
        if success and doc_id in self.documents_cache:
            del self.documents_cache[doc_id]
        return success
    
    def get_search_stats(self) -> Dict[str, Any]:
        """获取搜索统计信息"""
        stats = self.whoosh_engine.get_index_stats()
        stats['cache_size'] = len(self.documents_cache)
        return stats


# 创建全局搜索引擎管理器实例
search_engine_manager = SearchEngineManager()


if __name__ == '__main__':
    # 测试搜索引擎
    import asyncio
    
    async def test_search_engine():
        """测试搜索引擎"""
        # 测试文档
        test_docs = [
            SearchDocument(
                id="test_1",
                title="Python编程指南",
                content="Python是一种高级编程语言，具有简单易学、功能强大的特点。它广泛应用于Web开发、数据科学、人工智能等领域。",
                url="https://example.com/python-guide",
                doc_type="text",
                tags=["programming", "python", "tutorial"]
            ),
            SearchDocument(
                id="test_2",
                title="机器学习基础",
                content="机器学习是人工智能的一个分支，它使计算机能够从数据中学习并做出预测。常见的机器学习算法包括决策树、支持向量机等。",
                url="https://example.com/ml-basics",
                doc_type="text",
                tags=["machine-learning", "ai", "tutorial"]
            ),
            SearchDocument(
                id="test_3",
                title="Web开发最佳实践",
                content="Web开发涉及前端和后端技术。前端使用HTML、CSS、JavaScript，后端可以使用Python、Node.js等语言。",
                url="https://example.com/web-dev",
                doc_type="text",
                tags=["web", "development", "best-practices"]
            )
        ]
        
        # 添加测试文档
        print("添加测试文档...")
        for doc in test_docs:
            success = search_engine_manager.add_document(doc)
            print(f"文档 '{doc.title}' 添加结果: {success}")
        
        # 搜索测试
        print("\n搜索测试:")
        test_queries = ["Python编程", "机器学习", "Web开发", "人工智能"]
        
        for query in test_queries:
            print(f"\n搜索: '{query}'")
            result = search_engine_manager.search_documents(query, limit=5)
            
            print(f"找到 {result.total_count} 个结果，用时 {result.query_time:.3f}秒")
            for i, doc in enumerate(result.documents, 1):
                print(f"  {i}. {doc.title} (分数: {doc.score:.2f})")
                print(f"     {doc.content[:100]}...")
        
        # 统计信息
        print("\n搜索统计:")
        stats = search_engine_manager.get_search_stats()
        print(json.dumps(stats, indent=2, default=str))
    
    # 运行测试
    asyncio.run(test_search_engine())