import asyncio
import time
from typing import Dict, Any, List, Optional, Union
from urllib.parse import urljoin, urlparse
import json
from datetime import datetime
from app.core.logging import get_logger

try:
    from crawl4ai import AsyncWebCrawler
    from crawl4ai.extraction_strategy import LLMExtractionStrategy, CosineStrategy
    from crawl4ai.chunking_strategy import RegexChunking
    CRAWL4AI_AVAILABLE = True
except ImportError:
    CRAWL4AI_AVAILABLE = False
    AsyncWebCrawler = None
    LLMExtractionStrategy = None
    CosineStrategy = None
    RegexChunking = None

from app.schemas.crawler import CrawlerConfigBase

logger = get_logger(__name__)


class CrawlerTool:
    """基于crawl4ai的网页爬虫工具"""
    
    def __init__(self):
        self.crawler = None
        self._session_active = False
    
    async def __aenter__(self):
        """异步上下文管理器入口"""
        if not CRAWL4AI_AVAILABLE:
            raise ImportError("crawl4ai is not available. Please install it first.")
        
        await self.start_session()
        return self
    
    async def __aexit__(self, exc_type, exc_val, exc_tb):
        """异步上下文管理器出口"""
        await self.close_session()
    
    async def start_session(self):
        """启动爬虫会话"""
        if not CRAWL4AI_AVAILABLE:
            raise ImportError("crawl4ai is not available. Please install it first.")
        
        if not self._session_active:
            self.crawler = AsyncWebCrawler(verbose=True)
            await self.crawler.astart()
            self._session_active = True
            logger.info("Crawler session started")
    
    async def close_session(self):
        """关闭爬虫会话"""
        if self._session_active and self.crawler:
            await self.crawler.aclose()
            self._session_active = False
            logger.info("Crawler session closed")
    
    def _build_crawler_config(self, config: Optional[CrawlerConfigBase] = None) -> Dict[str, Any]:
        """构建crawl4ai配置"""
        crawler_config = {
            "word_count_threshold": 10,
            "only_text": False,
            "bypass_cache": True,
        }
        
        if not config:
            return crawler_config
        
        # JavaScript配置
        if config.javascript_enabled:
            crawler_config["js_code"] = []
            if config.wait_for:
                crawler_config["wait_for"] = config.wait_for
        
        # 请求配置
        if config.headers:
            crawler_config["headers"] = config.headers
        
        if config.user_agent:
            crawler_config["user_agent"] = config.user_agent
        
        # 延迟配置
        if config.delay:
            crawler_config["delay_before_return_html"] = config.delay
        
        return crawler_config
    
    def _extract_content(self, result, config: Optional[CrawlerConfigBase] = None) -> Dict[str, Any]:
        """从爬取结果中提取内容"""
        extracted_data = {
            "url": result.url,
            "title": getattr(result, 'title', '') or '',
            "content": "",
            "metadata": {},
            "links": [],
            "images": [],
            "status_code": getattr(result, 'status_code', None),
            "success": result.success,
            "error_message": getattr(result, 'error_message', None)
        }
        
        if not result.success:
            return extracted_data
        
        # 提取文本内容
        if config and config.extract_text:
            extracted_data["content"] = result.cleaned_html or result.markdown or ""
        
        # 提取链接
        if config and config.extract_links and hasattr(result, 'links'):
            try:
                if isinstance(result.links, dict):
                    # 处理内部和外部链接
                    internal_links = result.links.get('internal', [])
                    external_links = result.links.get('external', [])
                    extracted_data["links"] = internal_links + external_links
                elif isinstance(result.links, list):
                    extracted_data["links"] = result.links
            except Exception as e:
                logger.warning(f"Error extracting links: {e}")
        
        # 提取图片
        if config and config.extract_images and hasattr(result, 'media'):
            try:
                if isinstance(result.media, dict):
                    extracted_data["images"] = result.media.get('images', [])
                elif isinstance(result.media, list):
                    extracted_data["images"] = result.media
            except Exception as e:
                logger.warning(f"Error extracting images: {e}")
        
        # 提取元数据
        if config and config.extract_metadata:
            metadata = {}
            if hasattr(result, 'metadata') and result.metadata:
                metadata.update(result.metadata)
            
            # 添加基本元数据
            metadata.update({
                "content_length": len(extracted_data["content"]),
                "links_count": len(extracted_data["links"]),
                "images_count": len(extracted_data["images"]),
                "crawled_at": datetime.utcnow().isoformat()
            })
            extracted_data["metadata"] = metadata
        
        # 使用CSS选择器提取自定义数据
        if config and config.css_selectors and hasattr(result, 'html'):
            try:
                from bs4 import BeautifulSoup
                soup = BeautifulSoup(result.html, 'html.parser')
                custom_data = {}
                
                for key, selector in config.css_selectors.items():
                    elements = soup.select(selector)
                    if elements:
                        custom_data[key] = [elem.get_text(strip=True) for elem in elements]
                
                extracted_data["custom_data"] = custom_data
            except Exception as e:
                logger.warning(f"Error extracting custom data with CSS selectors: {e}")
        
        return extracted_data
    
    async def crawl_single_page(
        self, 
        url: str, 
        config: Optional[CrawlerConfigBase] = None
    ) -> Dict[str, Any]:
        """爬取单个页面"""
        if not self._session_active:
            await self.start_session()
        
        start_time = time.time()
        
        try:
            # 构建爬虫配置
            crawler_config = self._build_crawler_config(config)
            
            # 执行爬取
            logger.info(f"Starting to crawl: {url}")
            result = await self.crawler.arun(url=url, **crawler_config)
            
            # 提取内容
            extracted_data = self._extract_content(result, config)
            
            execution_time = time.time() - start_time
            extracted_data["execution_time"] = execution_time
            
            logger.info(f"Successfully crawled {url} in {execution_time:.2f}s")
            return extracted_data
            
        except Exception as e:
            execution_time = time.time() - start_time
            error_msg = f"Error crawling {url}: {str(e)}"
            logger.error(error_msg)
            
            return {
                "url": url,
                "success": False,
                "error_message": error_msg,
                "execution_time": execution_time,
                "title": "",
                "content": "",
                "metadata": {},
                "links": [],
                "images": []
            }
    
    async def crawl_multiple_pages(
        self,
        urls: List[str],
        config: Optional[CrawlerConfigBase] = None
    ) -> List[Dict[str, Any]]:
        """爬取多个页面"""
        if not self._session_active:
            await self.start_session()
        
        results = []
        delay = config.delay if config and config.delay else 1.0
        
        for i, url in enumerate(urls):
            try:
                result = await self.crawl_single_page(url, config)
                results.append(result)
                
                # 添加延迟，避免过于频繁的请求
                if i < len(urls) - 1:  # 不在最后一个URL后延迟
                    await asyncio.sleep(delay)
                    
            except Exception as e:
                logger.error(f"Error crawling {url}: {e}")
                results.append({
                    "url": url,
                    "success": False,
                    "error_message": str(e),
                    "execution_time": 0,
                    "title": "",
                    "content": "",
                    "metadata": {},
                    "links": [],
                    "images": []
                })
        
        return results
    
    async def crawl_with_depth(
        self,
        start_url: str,
        config: Optional[CrawlerConfigBase] = None,
        max_depth: int = 2,
        max_pages: int = 10
    ) -> List[Dict[str, Any]]:
        """深度爬取，跟踪链接到指定深度"""
        if not self._session_active:
            await self.start_session()
        
        visited_urls = set()
        results = []
        urls_to_visit = [(start_url, 0)]  # (url, depth)
        
        while urls_to_visit and len(results) < max_pages:
            current_url, current_depth = urls_to_visit.pop(0)
            
            if current_url in visited_urls or current_depth > max_depth:
                continue
            
            visited_urls.add(current_url)
            
            # 爬取当前页面
            page_result = await self.crawl_single_page(current_url, config)
            results.append(page_result)
            
            # 如果还没达到最大深度，添加页面中的链接到待访问列表
            if current_depth < max_depth and page_result.get("success", False):
                links = page_result.get("links", [])
                base_domain = urlparse(start_url).netloc
                
                for link in links:
                    try:
                        # 处理相对链接
                        if link.startswith('/'):
                            full_url = urljoin(current_url, link)
                        elif link.startswith('http'):
                            full_url = link
                        else:
                            continue
                        
                        # 过滤规则检查
                        if config and config.include_patterns:
                            if not any(pattern in full_url for pattern in config.include_patterns):
                                continue
                        
                        if config and config.exclude_patterns:
                            if any(pattern in full_url for pattern in config.exclude_patterns):
                                continue
                        
                        # 只爬取同域名的链接（可选）
                        link_domain = urlparse(full_url).netloc
                        if link_domain == base_domain and full_url not in visited_urls:
                            urls_to_visit.append((full_url, current_depth + 1))
                            
                    except Exception as e:
                        logger.warning(f"Error processing link {link}: {e}")
            
            # 添加延迟
            if config and config.delay:
                await asyncio.sleep(config.delay)
        
        return results
    
    def is_available(self) -> bool:
        """检查crawl4ai是否可用"""
        return CRAWL4AI_AVAILABLE


# 全局爬虫实例
crawler_tool = CrawlerTool()