from typing import Dict, Any, Optional, List, Union
from datetime import datetime, timedelta
import asyncio
import json
import aiohttp
import requests
from urllib.parse import urljoin, urlparse
from dataclasses import dataclass
from enum import Enum
import logging
from tenacity import retry, stop_after_attempt, wait_exponential
import hashlib
import time
import re
from bs4 import BeautifulSoup
import whoosh
from whoosh import index
from whoosh.fields import Schema, TEXT, ID, DATETIME, KEYWORD
from whoosh.qparser import QueryParser, MultifieldParser
from whoosh.query import Every, Term, And, Or
import jieba

from utils.logger import Logger
from utils.exceptions import CrawlerError, SearchEngineError
from states.circuit_breaker_manager import with_circuit_breaker

logger = get_logger("crawler_search_engine")


class CrawlerStatus(Enum):
    """爬虫状态"""
    PENDING = "pending"
    RUNNING = "running"
    COMPLETED = "completed"
    FAILED = "failed"
    TIMEOUT = "timeout"


class SearchType(Enum):
    """搜索类型"""
    FULLTEXT = "fulltext"      # 全文搜索
    SEMANTIC = "semantic"        # 语义搜索
    HYBRID = "hybrid"          # 混合搜索
    REALTIME = "realtime"      # 实时搜索


@dataclass
class CrawlerConfig:
    """爬虫配置"""
    max_depth: int = 2                    # 最大爬取深度
    max_pages: int = 100                  # 最大页面数
    timeout: int = 30                     # 超时时间（秒）
    concurrent_requests: int = 5          # 并发请求数
    user_agent: str = "MAESS-Crawler/1.0" # 用户代理
    respect_robots_txt: bool = True       # 遵守robots.txt
    delay_between_requests: float = 1.0   # 请求间隔（秒）
    enable_javascript: bool = False         # 启用JavaScript
    max_retries: int = 3                  # 最大重试次数


@dataclass
class SearchConfig:
    """搜索配置"""
    index_dir: str = "data/search_index"  # 索引目录
    max_results: int = 100                # 最大结果数
    highlight: bool = True                # 高亮显示
    fuzzy_match: bool = True              # 模糊匹配
    semantic_weight: float = 0.3          # 语义搜索权重
    fulltext_weight: float = 0.7          # 全文搜索权重
    realtime_timeout: int = 10            # 实时搜索超时（秒）


@dataclass
class CrawlResult:
    """爬取结果"""
    url: str
    title: str
    content: str
    metadata: Dict[str, Any]
    timestamp: datetime
    status: CrawlerStatus
    error: Optional[str] = None
    depth: int = 0


@dataclass
class SearchResult:
    """搜索结果"""
    url: str
    title: str
    content: str
    score: float
    metadata: Dict[str, Any]
    timestamp: datetime
    search_type: SearchType
    highlights: Optional[List[str]] = None


class WebCrawler:
    """网络爬虫"""

    def __init__(self, config: Optional[CrawlerConfig] = None):
        self.config = config or CrawlerConfig()
        self.session = None
        self.visited_urls: set = set()
        self.crawl_results: List[CrawlResult] = []
        self._initialize_session()

    def _initialize_session(self):
        """初始化HTTP会话"""
        self.session = aiohttp.ClientSession(
            timeout=aiohttp.ClientTimeout(total=self.config.timeout),
            headers={"User-Agent": self.config.user_agent}
        )

    @with_circuit_breaker("crawler")
    async def crawl_url(self, url: str, depth: int = 0) -> Optional[CrawlResult]:
        """爬取单个URL"""
        try:
            if url in self.visited_urls:
                return None
            
            if depth > self.config.max_depth:
                return None
            
            self.visited_urls.add(url)
            
            # 发送HTTP请求
            async with self.session.get(url) as response:
                if response.status != 200:
                    return CrawlResult(
                        url=url,
                        title="",
                        content="",
                        metadata={"status_code": response.status},
                        timestamp=datetime.now(),
                        status=CrawlerStatus.FAILED,
                        error=f"HTTP {response.status}",
                        depth=depth
                    )
                
                content = await response.text()
                
                # 解析HTML
                soup = BeautifulSoup(content, 'html.parser')
                
                # 提取标题
                title = soup.find('title')
                title_text = title.get_text().strip() if title else url
                
                # 提取主要内容
                # 移除脚本和样式
                for script in soup(["script", "style"]):
                    script.decompose()
                
                # 获取文本内容
                text_content = soup.get_text()
                # 清理空白字符
                lines = (line.strip() for line in text_content.splitlines())
                text = ' '.join(chunk for line in lines for chunk in line.split() if chunk)
                
                # 提取元数据
                metadata = {
                    "url": url,
                    "depth": depth,
                    "content_length": len(text),
                    "title_length": len(title_text),
                    "links_found": len(soup.find_all('a', href=True)),
                    "images_found": len(soup.find_all('img')),
                    "status_code": response.status,
                    "content_type": response.headers.get('content-type', ''),
                    "last_modified": response.headers.get('last-modified', ''),
                    "response_time": time.time()
                }
                
                result = CrawlResult(
                    url=url,
                    title=title_text,
                    content=text,
                    metadata=metadata,
                    timestamp=datetime.now(),
                    status=CrawlerStatus.COMPLETED,
                    depth=depth
                )
                
                self.crawl_results.append(result)
                
                # 延迟
                await asyncio.sleep(self.config.delay_between_requests)
                
                return result
                
        except asyncio.TimeoutError:
            return CrawlResult(
                url=url,
                title="",
                content="",
                metadata={},
                timestamp=datetime.now(),
                status=CrawlerStatus.TIMEOUT,
                error="Request timeout",
                depth=depth
            )
        except Exception as e:
            logger.error(f"爬取失败: {url}, 错误: {str(e)}")
            return CrawlResult(
                url=url,
                title="",
                content="",
                metadata={},
                timestamp=datetime.now(),
                status=CrawlerStatus.FAILED,
                error=str(e),
                depth=depth
            )

    async def crawl_multiple_urls(self, urls: List[str]) -> List[CrawlResult]:
        """批量爬取URL"""
        semaphore = asyncio.Semaphore(self.config.concurrent_requests)
        
        async def crawl_with_semaphore(url: str, depth: int = 0):
            async with semaphore:
                return await self.crawl_url(url, depth)
        
        tasks = [crawl_with_semaphore(url) for url in urls]
        results = await asyncio.gather(*tasks, return_exceptions=True)
        
        # 过滤掉异常和None结果
        valid_results = []
        for result in results:
            if isinstance(result, CrawlResult):
                valid_results.append(result)
            elif isinstance(result, Exception):
                logger.error(f"爬取任务异常: {str(result)}")
        
        return valid_results

    async def crawl_with_depth(self, start_urls: List[str]) -> List[CrawlResult]:
        """带深度的爬取"""
        all_results = []
        current_urls = start_urls.copy()
        
        for depth in range(self.config.max_depth + 1):
            if not current_urls or len(self.visited_urls) >= self.config.max_pages:
                break
            
            logger.info(f"开始深度 {depth} 爬取, URL数量: {len(current_urls)}")
            
            # 爬取当前深度的URL
            results = await self.crawl_multiple_urls(current_urls)
            all_results.extend(results)
            
            # 提取下一层的URL
            next_urls = []
            for result in results:
                if result.status == CrawlerStatus.COMPLETED and result.content:
                    # 从内容中提取URL（简化版本）
                    urls = self._extract_urls_from_content(result.content, result.url)
                    for url in urls:
                        if url not in self.visited_urls and len(self.visited_urls) + len(next_urls) < self.config.max_pages:
                            next_urls.append(url)
            
            current_urls = next_urls
            
            # 延迟
            if current_urls:
                await asyncio.sleep(self.config.delay_between_requests * 2)
        
        return all_results

    def _extract_urls_from_content(self, content: str, base_url: str) -> List[str]:
        """从内容中提取URL"""
        urls = []
        
        # 简单的URL提取正则表达式
        url_pattern = re.compile(r'https?://[^\s<>"{}|\\^`\[\]]+')
        found_urls = url_pattern.findall(content)
        
        for url in found_urls:
            try:
                # 验证URL格式
                parsed = urlparse(url)
                if parsed.scheme and parsed.netloc:
                    urls.append(url)
            except Exception:
                continue
        
        return urls[:10]  # 限制数量

    async def close(self):
        """关闭会话"""
        if self.session:
            await self.session.close()


class SearchEngine:
    """搜索引擎"""

    def __init__(self, config: Optional[SearchConfig] = None):
        self.config = config or SearchConfig()
        self.index = None
        self.writer = None
        self._initialize_index()

    def _initialize_index(self):
        """初始化搜索索引"""
        try:
            # 创建索引目录
            import os
            os.makedirs(self.config.index_dir, exist_ok=True)
            
            # 定义索引模式
            schema = Schema(
                url=ID(stored=True, unique=True),
                title=TEXT(stored=True, field_boost=2.0),
                content=TEXT(stored=True),
                metadata=TEXT(stored=True),
                timestamp=DATETIME(stored=True),
                keywords=KEYWORD(stored=True, lowercase=True, commas=True),
                search_text=TEXT(stored=True)  # 用于搜索的综合文本
            )
            
            # 创建或打开索引
            if index.exists_in(self.config.index_dir):
                self.index = index.open_dir(self.config.index_dir)
            else:
                self.index = index.create_in(self.config.index_dir, schema)
            
            logger.info("搜索引擎索引初始化完成")
            
        except Exception as e:
            logger.error(f"搜索引擎索引初始化失败: {str(e)}")
            raise SearchEngineError(f"搜索引擎索引初始化失败: {str(e)}")

    async def index_document(self, crawl_result: CrawlResult) -> bool:
        """索引文档"""
        try:
            writer = self.index.writer()
            
            # 提取关键词
            keywords = self._extract_keywords(crawl_result.content)
            
            # 构建搜索文本
            search_text = f"{crawl_result.title} {crawl_result.content} {' '.join(keywords)}"
            
            # 添加文档到索引
            writer.add_document(
                url=crawl_result.url,
                title=crawl_result.title,
                content=crawl_result.content,
                metadata=json.dumps(crawl_result.metadata),
                timestamp=crawl_result.timestamp,
                keywords=','.join(keywords),
                search_text=search_text
            )
            
            writer.commit()
            logger.info(f"文档索引完成: {crawl_result.url}")
            return True
            
        except Exception as e:
            logger.error(f"文档索引失败: {str(e)}")
            if writer:
                writer.cancel()
            return False

    def _extract_keywords(self, text: str, max_keywords: int = 20) -> List[str]:
        """提取关键词"""
        try:
            # 使用jieba进行中文分词
            words = jieba.cut(text)
            
            # 过滤和统计词频
            word_freq = {}
            for word in words:
                word = word.strip()
                if len(word) > 1 and not word.isdigit():  # 过滤单字和数字
                    word_freq[word] = word_freq.get(word, 0) + 1
            
            # 按频率排序并返回前N个
            sorted_words = sorted(word_freq.items(), key=lambda x: x[1], reverse=True)
            return [word for word, freq in sorted_words[:max_keywords]]
            
        except Exception as e:
            logger.error(f"关键词提取失败: {str(e)}")
            return []

    @with_circuit_breaker("crawler")
    async def search(self, query: str, search_type: SearchType = SearchType.FULLTEXT,
                    max_results: Optional[int] = None) -> List[SearchResult]:
        """搜索"""
        try:
            max_results_val = max_results or self.config.max_results
            
            if search_type == SearchType.FULLTEXT:
                return await self._fulltext_search(query, max_results_val)
            elif search_type == SearchType.SEMANTIC:
                return await self._semantic_search(query, max_results_val)
            elif search_type == SearchType.HYBRID:
                return await self._hybrid_search(query, max_results_val)
            else:
                return await self._fulltext_search(query, max_results_val)
                
        except Exception as e:
            logger.error(f"搜索失败: {str(e)}")
            raise SearchEngineError(f"搜索失败: {str(e)}")

    async def _fulltext_search(self, query: str, max_results: int) -> List[SearchResult]:
        """全文搜索"""
        try:
            with self.index.searcher() as searcher:
                # 创建查询解析器
                parser = MultifieldParser(
                    ["title", "content", "keywords"], 
                    self.index.schema
                )
                
                # 解析查询
                parsed_query = parser.parse(query)
                
                # 执行搜索
                results = searcher.search(parsed_query, limit=max_results)
                
                # 转换结果
                search_results = []
                for hit in results:
                    # 提取高亮
                    highlights = []
                    if self.config.highlight:
                        highlights = hit.highlights("content", top=3)
                    
                    result = SearchResult(
                        url=hit["url"],
                        title=hit["title"],
                        content=hit["content"][:500] + "..." if len(hit["content"]) > 500 else hit["content"],
                        score=float(hit.score),
                        metadata=json.loads(hit["metadata"]) if hit.get("metadata") else {},
                        timestamp=hit["timestamp"],
                        search_type=SearchType.FULLTEXT,
                        highlights=highlights if highlights else None
                    )
                    search_results.append(result)
                
                logger.info(f"全文搜索完成: 查询='{query}', 结果数量={len(search_results)}")
                return search_results
                
        except Exception as e:
            logger.error(f"全文搜索失败: {str(e)}")
            raise SearchEngineError(f"全文搜索失败: {str(e)}")

    async def _semantic_search(self, query: str, max_results: int) -> List[SearchResult]:
        """语义搜索（简化版本）"""
        try:
            # 这里可以实现更复杂的语义搜索
            # 目前使用基于关键词的改进搜索
            
            # 提取查询关键词
            query_keywords = self._extract_keywords(query, max_keywords=10)
            
            # 构建扩展查询
            extended_query = f"{query} {' '.join(query_keywords)}"
            
            # 使用扩展查询进行全文搜索
            return await self._fulltext_search(extended_query, max_results)
            
        except Exception as e:
            logger.error(f"语义搜索失败: {str(e)}")
            raise SearchEngineError(f"语义搜索失败: {str(e)}")

    async def _hybrid_search(self, query: str, max_results: int) -> List[SearchResult]:
        """混合搜索"""
        try:
            # 执行全文搜索
            fulltext_results = await self._fulltext_search(query, max_results)
            
            # 执行语义搜索
            semantic_results = await self._semantic_search(query, max_results)
            
            # 合并和重新排序结果
            all_results = {}
            
            # 添加全文搜索结果
            for result in fulltext_results:
                if result.url not in all_results:
                    all_results[result.url] = result
                    all_results[result.url].score *= self.config.fulltext_weight
            
            # 添加语义搜索结果
            for result in semantic_results:
                if result.url in all_results:
                    # 如果已存在，增加权重
                    all_results[result.url].score += result.score * self.config.semantic_weight
                else:
                    result.score *= self.config.semantic_weight
                    all_results[result.url] = result
            
            # 按分数排序
            sorted_results = sorted(all_results.values(), key=lambda x: x.score, reverse=True)
            
            # 更新搜索类型
            for result in sorted_results[:max_results]:
                result.search_type = SearchType.HYBRID
            
            logger.info(f"混合搜索完成: 查询='{query}', 结果数量={len(sorted_results)}")
            return sorted_results[:max_results]
            
        except Exception as e:
            logger.error(f"混合搜索失败: {str(e)}")
            raise SearchEngineError(f"混合搜索失败: {str(e)}")

    async def realtime_search(self, query: str, urls: List[str], 
                            max_results: Optional[int] = None) -> List[SearchResult]:
        """实时搜索"""
        try:
            max_results_val = max_results or self.config.max_results
            
            # 爬取指定URL
            crawler = WebCrawler()
            crawl_results = await crawler.crawl_multiple_urls(urls)
            await crawler.close()
            
            # 索引新爬取的文档
            indexed_count = 0
            for result in crawl_results:
                if result.status == CrawlerStatus.COMPLETED:
                    success = await self.index_document(result)
                    if success:
                        indexed_count += 1
            
            logger.info(f"实时搜索索引完成: 索引了 {indexed_count} 个文档")
            
            # 执行搜索
            search_results = await self.search(query, SearchType.HYBRID, max_results_val)
            
            # 标记为实时搜索结果
            for result in search_results:
                result.search_type = SearchType.REALTIME
            
            return search_results
            
        except Exception as e:
            logger.error(f"实时搜索失败: {str(e)}")
            raise SearchEngineError(f"实时搜索失败: {str(e)}")

    async def delete_document(self, url: str) -> bool:
        """删除文档"""
        try:
            writer = self.index.writer()
            writer.delete_by_term('url', url)
            writer.commit()
            
            logger.info(f"文档删除完成: {url}")
            return True
            
        except Exception as e:
            logger.error(f"文档删除失败: {str(e)}")
            if writer:
                writer.cancel()
            return False

    async def get_document_count(self) -> int:
        """获取文档数量"""
        try:
            with self.index.searcher() as searcher:
                return searcher.doc_count()
        except Exception as e:
            logger.error(f"获取文档数量失败: {str(e)}")
            return 0

    async def get_index_stats(self) -> Dict[str, Any]:
        """获取索引统计"""
        try:
            with self.index.searcher() as searcher:
                return {
                    "document_count": searcher.doc_count(),
                    "field_count": len(self.index.schema.names()),
                    "schema_fields": list(self.index.schema.names()),
                    "last_modified": datetime.now().isoformat()
                }
        except Exception as e:
            logger.error(f"获取索引统计失败: {str(e)}")
            return {}


class CrawlerSearchEngine:
    """爬虫搜索引擎集成"""

    def __init__(self, crawler_config: Optional[CrawlerConfig] = None,
                 search_config: Optional[SearchConfig] = None):
        self.crawler_config = crawler_config or CrawlerConfig()
        self.search_config = search_config or SearchConfig()
        self.search_engine = SearchEngine(self.search_config)
        self.web_crawler = WebCrawler(self.crawler_config)

    async def crawl_and_index(self, urls: List[str], with_depth: bool = False) -> Dict[str, Any]:
        """爬取并索引"""
        try:
            logger.info(f"开始爬取和索引: URL数量={len(urls)}")
            
            # 爬取URL
            if with_depth:
                crawl_results = await self.web_crawler.crawl_with_depth(urls)
            else:
                crawl_results = await self.web_crawler.crawl_multiple_urls(urls)
            
            # 索引结果
            indexed_count = 0
            failed_count = 0
            
            for result in crawl_results:
                if result.status == CrawlerStatus.COMPLETED:
                    success = await self.search_engine.index_document(result)
                    if success:
                        indexed_count += 1
                    else:
                        failed_count += 1
                else:
                    failed_count += 1
            
            logger.info(f"爬取和索引完成: 成功={indexed_count}, 失败={failed_count}")
            
            return {
                "total_urls": len(urls),
                "crawled_count": len(crawl_results),
                "indexed_count": indexed_count,
                "failed_count": failed_count,
                "crawl_results": [asdict(result) for result in crawl_results]
            }
            
        except Exception as e:
            logger.error(f"爬取和索引失败: {str(e)}")
            raise CrawlerError(f"爬取和索引失败: {str(e)}")

    async def search_with_crawl(self, query: str, urls: List[str],
                              search_type: SearchType = SearchType.HYBRID,
                              max_results: Optional[int] = None) -> List[SearchResult]:
        """搜索并爬取（如果本地没有结果）"""
        try:
            # 首先尝试本地搜索
            local_results = await self.search_engine.search(query, search_type, max_results)
            
            # 如果本地结果不足，进行实时搜索
            if len(local_results) < (max_results or self.search_config.max_results) // 2:
                logger.info(f"本地结果不足({len(local_results)}), 开始实时搜索")
                realtime_results = await self.search_engine.realtime_search(query, urls, max_results)
                
                # 合并结果
                combined_results = {}
                
                # 添加本地结果
                for result in local_results:
                    combined_results[result.url] = result
                
                # 添加实时结果
                for result in realtime_results:
                    if result.url not in combined_results:
                        combined_results[result.url] = result
                
                # 重新排序
                sorted_results = sorted(combined_results.values(), key=lambda x: x.score, reverse=True)
                return sorted_results[:max_results]
            else:
                return local_results
                
        except Exception as e:
            logger.error(f"搜索并爬取失败: {str(e)}")
            raise SearchEngineError(f"搜索并爬取失败: {str(e)}")

    async def get_search_stats(self) -> Dict[str, Any]:
        """获取搜索统计"""
        try:
            index_stats = await self.search_engine.get_index_stats()
            
            return {
                "search_engine": index_stats,
                "crawler_config": asdict(self.crawler_config),
                "search_config": asdict(self.search_config),
                "timestamp": datetime.now().isoformat()
            }
            
        except Exception as e:
            logger.error(f"获取搜索统计失败: {str(e)}")
            return {}

    async def close(self):
        """关闭资源"""
        try:
            await self.web_crawler.close()
            logger.info("爬虫搜索引擎已关闭")
        except Exception as e:
            logger.error(f"关闭爬虫搜索引擎失败: {str(e)}")


# 全局实例
crawler_search_engine = None


async def get_crawler_search_engine() -> CrawlerSearchEngine:
    """获取爬虫搜索引擎实例"""
    global crawler_search_engine
    if crawler_search_engine is None:
        crawler_search_engine = CrawlerSearchEngine()
    return crawler_search_engine