import asyncio
import time
import json
import zlib
from typing import Dict, Any, List, Optional, Union
from urllib.parse import urljoin, urlparse
from datetime import datetime
from functools import wraps
import aiohttp
from aiohttp import ClientSession, ClientTimeout
import redis
from app.core.logging import get_logger

try:
    from crawl4ai import AsyncWebCrawler
    from crawl4ai.extraction_strategy import LLMExtractionStrategy, CosineStrategy
    from crawl4ai.chunking_strategy import RegexChunking
    CRAWL4AI_AVAILABLE = True
except ImportError:
    CRAWL4AI_AVAILABLE = False
    AsyncWebCrawler = None
    LLMExtractionStrategy = None
    CosineStrategy = None
    RegexChunking = None

from app.schemas.crawler import CrawlerConfigBase

logger = get_logger(__name__)


class RateLimiter:
    """请求速率限制器"""
    
    def __init__(self, max_requests: int = 10, time_window: float = 60.0):
        self.max_requests = max_requests
        self.time_window = time_window
        self.requests = []
    
    async def acquire(self):
        """获取请求许可"""
        now = time.time()
        # 清理过期的请求记录
        self.requests = [req_time for req_time in self.requests if now - req_time < self.time_window]
        
        if len(self.requests) >= self.max_requests:
            # 计算需要等待的时间
            oldest_request = min(self.requests)
            wait_time = self.time_window - (now - oldest_request)
            if wait_time > 0:
                await asyncio.sleep(wait_time)
                # 重新清理过期请求
                now = time.time()
                self.requests = [req_time for req_time in self.requests if now - req_time < self.time_window]
        
        self.requests.append(now)


class ProxyManager:
    """代理管理器"""
    
    def __init__(self, proxies: List[str] = None):
        self.proxies = proxies or []
        self.current_index = 0
    
    def get_proxy(self) -> Optional[str]:
        """获取下一个代理"""
        if not self.proxies:
            return None
        
        proxy = self.proxies[self.current_index]
        self.current_index = (self.current_index + 1) % len(self.proxies)
        return proxy


class ContentCompressor:
    """内容压缩器"""
    
    @staticmethod
    def compress(content: str) -> bytes:
        """压缩内容"""
        return zlib.compress(content.encode('utf-8'))
    
    @staticmethod
    def decompress(compressed_content: bytes) -> str:
        """解压内容"""
        return zlib.decompress(compressed_content).decode('utf-8')
    
    @staticmethod
    def chunk_content(content: str, chunk_size: int = 1024 * 1024) -> List[bytes]:
        """分块内容"""
        chunks = []
        for i in range(0, len(content), chunk_size):
            chunk = content[i:i + chunk_size]
            chunks.append(ContentCompressor.compress(chunk))
        return chunks


class CacheManager:
    """缓存管理器"""
    
    def __init__(self, redis_url: str = "redis://localhost:6379/3"):
        self.redis_client = redis.from_url(redis_url)
        self.default_ttl = 3600  # 1小时默认过期时间
    
    def get(self, key: str) -> Optional[str]:
        """获取缓存"""
        try:
            cached_data = self.redis_client.get(key)
            if cached_data:
                return ContentCompressor.decompress(cached_data)
        except Exception as e:
            logger.warning(f"Cache get error: {e}")
        return None
    
    def set(self, key: str, value: str, ttl: int = None):
        """设置缓存"""
        try:
            compressed_value = ContentCompressor.compress(value)
            self.redis_client.setex(key, ttl or self.default_ttl, compressed_value)
        except Exception as e:
            logger.warning(f"Cache set error: {e}")
    
    def delete(self, key: str):
        """删除缓存"""
        try:
            self.redis_client.delete(key)
        except Exception as e:
            logger.warning(f"Cache delete error: {e}")


class PerformanceTracker:
    """性能追踪器"""
    
    def __init__(self):
        self.metrics = {
            "requests_count": 0,
            "total_time": 0.0,
            "success_count": 0,
            "error_count": 0,
            "bytes_transferred": 0
        }
    
    def record_request(self, execution_time: float, success: bool, bytes_count: int = 0):
        """记录请求"""
        self.metrics["requests_count"] += 1
        self.metrics["total_time"] += execution_time
        if success:
            self.metrics["success_count"] += 1
        else:
            self.metrics["error_count"] += 1
        self.metrics["bytes_transferred"] += bytes_count
    
    def get_metrics(self) -> Dict[str, Any]:
        """获取性能指标"""
        metrics = self.metrics.copy()
        if metrics["requests_count"] > 0:
            metrics["average_time"] = metrics["total_time"] / metrics["requests_count"]
            metrics["success_rate"] = metrics["success_count"] / metrics["requests_count"]
        else:
            metrics["average_time"] = 0
            metrics["success_rate"] = 0
        return metrics


def retry_on_failure(max_retries: int = 3, delay: float = 1.0):
    """重试装饰器"""
    def decorator(func):
        @wraps(func)
        async def wrapper(*args, **kwargs):
            last_exception = None
            for attempt in range(max_retries + 1):
                try:
                    return await func(*args, **kwargs)
                except Exception as e:
                    last_exception = e
                    if attempt < max_retries:
                        logger.warning(f"Attempt {attempt + 1} failed: {e}. Retrying in {delay} seconds...")
                        await asyncio.sleep(delay * (2 ** attempt))  # 指数退避
                    else:
                        logger.error(f"All {max_retries + 1} attempts failed. Last error: {e}")
            raise last_exception
        return wrapper
    return decorator


class OptimizedCrawlerTool:
    """优化的网页爬虫工具"""
    
    def __init__(self, redis_url: str = "redis://localhost:6379/3"):
        self.crawler = None
        self._session_active = False
        self.rate_limiter = RateLimiter(max_requests=10, time_window=60.0)
        self.proxy_manager = ProxyManager()
        self.cache_manager = CacheManager(redis_url)
        self.performance_tracker = PerformanceTracker()
        self.session = None
        self.timeout = ClientTimeout(total=30)
    
    async def __aenter__(self):
        """异步上下文管理器入口"""
        if not CRAWL4AI_AVAILABLE:
            raise ImportError("crawl4ai is not available. Please install it first.")
        
        await self.start_session()
        return self
    
    async def __aexit__(self, exc_type, exc_val, exc_tb):
        """异步上下文管理器出口"""
        await self.close_session()
    
    async def start_session(self):
        """启动爬虫会话"""
        if not CRAWL4AI_AVAILABLE:
            raise ImportError("crawl4ai is not available. Please install it first.")
        
        if not self._session_active:
            self.crawler = AsyncWebCrawler(verbose=True)
            await self.crawler.astart()
            # 创建aiohttp会话用于额外功能
            self.session = ClientSession(timeout=self.timeout)
            self._session_active = True
            logger.info("Optimized crawler session started")
    
    async def close_session(self):
        """关闭爬虫会话"""
        if self._session_active:
            if self.crawler:
                await self.crawler.aclose()
            if self.session:
                await self.session.close()
            self._session_active = False
            logger.info("Optimized crawler session closed")
    
    def _build_crawler_config(self, config: Optional[CrawlerConfigBase] = None) -> Dict[str, Any]:
        """构建crawl4ai配置"""
        crawler_config = {
            "word_count_threshold": 10,
            "only_text": False,
            "bypass_cache": True,
        }
        
        if not config:
            return crawler_config
        
        # JavaScript配置
        if config.javascript_enabled:
            crawler_config["js_code"] = []
            if config.wait_for:
                crawler_config["wait_for"] = config.wait_for
        
        # 请求配置
        if config.headers:
            crawler_config["headers"] = config.headers
        
        if config.user_agent:
            crawler_config["user_agent"] = config.user_agent
        
        # 延迟配置
        if config.delay:
            crawler_config["delay_before_return_html"] = config.delay
        
        return crawler_config
    
    def _extract_content(self, result, config: Optional[CrawlerConfigBase] = None) -> Dict[str, Any]:
        """从爬取结果中提取内容"""
        extracted_data = {
            "url": result.url,
            "title": getattr(result, 'title', '') or '',
            "content": "",
            "metadata": {},
            "links": [],
            "images": [],
            "status_code": getattr(result, 'status_code', None),
            "success": result.success,
            "error_message": getattr(result, 'error_message', None)
        }
        
        if not result.success:
            return extracted_data
        
        # 提取文本内容
        if config and config.extract_text:
            extracted_data["content"] = result.cleaned_html or result.markdown or ""
        
        # 提取链接
        if config and config.extract_links and hasattr(result, 'links'):
            try:
                if isinstance(result.links, dict):
                    # 处理内部和外部链接
                    internal_links = result.links.get('internal', [])
                    external_links = result.links.get('external', [])
                    extracted_data["links"] = internal_links + external_links
                elif isinstance(result.links, list):
                    extracted_data["links"] = result.links
            except Exception as e:
                logger.warning(f"Error extracting links: {e}")
        
        # 提取图片
        if config and config.extract_images and hasattr(result, 'media'):
            try:
                if isinstance(result.media, dict):
                    extracted_data["images"] = result.media.get('images', [])
                elif isinstance(result.media, list):
                    extracted_data["images"] = result.media
            except Exception as e:
                logger.warning(f"Error extracting images: {e}")
        
        # 提取元数据
        if config and config.extract_metadata:
            metadata = {}
            if hasattr(result, 'metadata') and result.metadata:
                metadata.update(result.metadata)
            
            # 添加基本元数据
            metadata.update({
                "content_length": len(extracted_data["content"]),
                "links_count": len(extracted_data["links"]),
                "images_count": len(extracted_data["images"]),
                "crawled_at": datetime.utcnow().isoformat()
            })
            extracted_data["metadata"] = metadata
        
        # 使用CSS选择器提取自定义数据
        if config and config.css_selectors and hasattr(result, 'html'):
            try:
                from bs4 import BeautifulSoup
                soup = BeautifulSoup(result.html, 'html.parser')
                custom_data = {}
                
                for key, selector in config.css_selectors.items():
                    elements = soup.select(selector)
                    if elements:
                        custom_data[key] = [elem.get_text(strip=True) for elem in elements]
                
                extracted_data["custom_data"] = custom_data
            except Exception as e:
                logger.warning(f"Error extracting custom data with CSS selectors: {e}")
        
        return extracted_data
    
    async def _take_screenshot(self, url: str) -> Optional[bytes]:
        """网页截图功能"""
        try:
            # 这里可以集成截图服务，例如使用playwright
            # 为简化示例，返回None
            logger.info(f"Screenshot functionality not implemented for {url}")
            return None
        except Exception as e:
            logger.warning(f"Error taking screenshot for {url}: {e}")
            return None
    
    async def _extract_structured_data(self, html: str) -> Dict[str, Any]:
        """增强结构化数据提取"""
        try:
            from bs4 import BeautifulSoup
            soup = BeautifulSoup(html, 'html.parser')
            
            structured_data = {
                "headings": {},
                "lists": [],
                "tables": [],
                "forms": [],
                "metadata": {}
            }
            
            # 提取标题
            for i in range(1, 7):
                headings = soup.find_all(f'h{i}')
                if headings:
                    structured_data["headings"][f'h{i}'] = [h.get_text(strip=True) for h in headings]
            
            # 提取列表
            lists = soup.find_all(['ul', 'ol'])
            for lst in lists:
                items = [li.get_text(strip=True) for li in lst.find_all('li')]
                if items:
                    structured_data["lists"].append(items)
            
            # 提取表格
            tables = soup.find_all('table')
            for table in tables:
                rows = []
                for row in table.find_all('tr'):
                    cells = [cell.get_text(strip=True) for cell in row.find_all(['td', 'th'])]
                    if cells:
                        rows.append(cells)
                if rows:
                    structured_data["tables"].append(rows)
            
            # 提取表单
            forms = soup.find_all('form')
            for form in forms:
                form_data = {
                    "action": form.get('action', ''),
                    "method": form.get('method', 'get'),
                    "inputs": []
                }
                inputs = form.find_all('input')
                for inp in inputs:
                    form_data["inputs"].append({
                        "name": inp.get('name', ''),
                        "type": inp.get('type', 'text'),
                        "value": inp.get('value', '')
                    })
                structured_data["forms"].append(form_data)
            
            return structured_data
        except Exception as e:
            logger.warning(f"Error extracting structured data: {e}")
            return {}
    
    async def _handle_file_content(self, url: str) -> Dict[str, Any]:
        """处理文件内容 (PDF, Word等)"""
        try:
            # 检查是否为支持的文件类型
            parsed_url = urlparse(url)
            path = parsed_url.path.lower()
            
            if path.endswith(('.pdf', '.doc', '.docx')):
                # 这里可以集成文件处理库，例如PyPDF2, python-docx等
                logger.info(f"File content handling not implemented for {url}")
                return {
                    "url": url,
                    "content_type": "file",
                    "content": f"File content from {url}",
                    "success": True
                }
            else:
                return {"url": url, "success": False, "error_message": "Unsupported file type"}
        except Exception as e:
            logger.warning(f"Error handling file content for {url}: {e}")
            return {"url": url, "success": False, "error_message": str(e)}
    
    @retry_on_failure(max_retries=3, delay=1.0)
    async def crawl_single_page(
        self, 
        url: str, 
        config: Optional[CrawlerConfigBase] = None
    ) -> Dict[str, Any]:
        """爬取单个页面"""
        if not self._session_active:
            await self.start_session()
        
        start_time = time.time()
        
        try:
            # 检查缓存
            cache_key = f"crawler:{url}"
            cached_result = self.cache_manager.get(cache_key)
            if cached_result:
                logger.info(f"Cache hit for {url}")
                execution_time = time.time() - start_time
                result = json.loads(cached_result)
                result["execution_time"] = execution_time
                result["from_cache"] = True
                self.performance_tracker.record_request(execution_time, True, len(cached_result))
                return result
            
            # 获取请求许可
            await self.rate_limiter.acquire()
            
            # 检查是否为文件类型
            parsed_url = urlparse(url)
            path = parsed_url.path.lower()
            if path.endswith(('.pdf', '.doc', '.docx')):
                result = await self._handle_file_content(url)
                execution_time = time.time() - start_time
                result["execution_time"] = execution_time
                self.performance_tracker.record_request(execution_time, result.get("success", False))
                return result
            
            # 构建爬虫配置
            crawler_config = self._build_crawler_config(config)
            
            # 执行爬取
            logger.info(f"Starting to crawl: {url}")
            result = await self.crawler.arun(url=url, **crawler_config)
            
            # 提取内容
            extracted_data = self._extract_content(result, config)
            
            # 增强结构化数据提取
            if config and config.extract_text and extracted_data.get("success", False):
                structured_data = await self._extract_structured_data(result.html)
                extracted_data["structured_data"] = structured_data
            
            # 网页截图（如果需要）
            if config and hasattr(config, 'take_screenshot') and config.take_screenshot:
                screenshot = await self._take_screenshot(url)
                if screenshot:
                    extracted_data["screenshot"] = screenshot
            
            execution_time = time.time() - start_time
            extracted_data["execution_time"] = execution_time
            extracted_data["from_cache"] = False
            
            # 缓存结果
            if extracted_data.get("success", False):
                self.cache_manager.set(cache_key, json.dumps(extracted_data))
            
            self.performance_tracker.record_request(execution_time, extracted_data.get("success", False), 
                                                  len(extracted_data.get("content", "")))
            
            logger.info(f"Successfully crawled {url} in {execution_time:.2f}s")
            return extracted_data
            
        except Exception as e:
            execution_time = time.time() - start_time
            error_msg = f"Error crawling {url}: {str(e)}"
            logger.error(error_msg)
            
            result = {
                "url": url,
                "success": False,
                "error_message": error_msg,
                "execution_time": execution_time,
                "title": "",
                "content": "",
                "metadata": {},
                "links": [],
                "images": [],
                "from_cache": False
            }
            
            self.performance_tracker.record_request(execution_time, False)
            return result
    
    async def crawl_multiple_pages(
        self,
        urls: List[str],
        config: Optional[CrawlerConfigBase] = None
    ) -> List[Dict[str, Any]]:
        """并发爬取多个页面"""
        if not self._session_active:
            await self.start_session()
        
        # 使用asyncio.gather实现并发爬取
        tasks = [self.crawl_single_page(url, config) for url in urls]
        results = await asyncio.gather(*tasks, return_exceptions=True)
        
        # 处理异常结果
        processed_results = []
        for i, result in enumerate(results):
            if isinstance(result, Exception):
                logger.error(f"Error crawling {urls[i]}: {result}")
                processed_results.append({
                    "url": urls[i],
                    "success": False,
                    "error_message": str(result),
                    "execution_time": 0,
                    "title": "",
                    "content": "",
                    "metadata": {},
                    "links": [],
                    "images": [],
                    "from_cache": False
                })
            else:
                processed_results.append(result)
        
        return processed_results
    
    async def crawl_with_depth(
        self,
        start_url: str,
        config: Optional[CrawlerConfigBase] = None,
        max_depth: int = 2,
        max_pages: int = 10
    ) -> List[Dict[str, Any]]:
        """深度爬取，跟踪链接到指定深度"""
        if not self._session_active:
            await self.start_session()
        
        visited_urls = set()
        results = []
        urls_to_visit = [(start_url, 0)]  # (url, depth)
        
        while urls_to_visit and len(results) < max_pages:
            current_url, current_depth = urls_to_visit.pop(0)
            
            if current_url in visited_urls or current_depth > max_depth:
                continue
            
            visited_urls.add(current_url)
            
            # 爬取当前页面
            page_result = await self.crawl_single_page(current_url, config)
            results.append(page_result)
            
            # 如果还没达到最大深度，添加页面中的链接到待访问列表
            if current_depth < max_depth and page_result.get("success", False):
                links = page_result.get("links", [])
                base_domain = urlparse(start_url).netloc
                
                for link in links:
                    try:
                        # 处理相对链接
                        if link.startswith('/'):
                            full_url = urljoin(current_url, link)
                        elif link.startswith('http'):
                            full_url = link
                        else:
                            continue
                        
                        # 过滤规则检查
                        if config and config.include_patterns:
                            if not any(pattern in full_url for pattern in config.include_patterns):
                                continue
                        
                        if config and config.exclude_patterns:
                            if any(pattern in full_url for pattern in config.exclude_patterns):
                                continue
                        
                        # 只爬取同域名的链接（可选）
                        link_domain = urlparse(full_url).netloc
                        if link_domain == base_domain and full_url not in visited_urls:
                            urls_to_visit.append((full_url, current_depth + 1))
                            
                    except Exception as e:
                        logger.warning(f"Error processing link {link}: {e}")
        
        return results
    
    def get_performance_metrics(self) -> Dict[str, Any]:
        """获取性能指标"""
        return self.performance_tracker.get_metrics()
    
    def is_available(self) -> bool:
        """检查crawl4ai是否可用"""
        return CRAWL4AI_AVAILABLE


# 全局优化爬虫实例
optimized_crawler_tool = OptimizedCrawlerTool()