import logging
from typing import Dict, List, Any, Optional, Union, Callable
from concurrent.futures import ThreadPoolExecutor, as_completed
import time
from datetime import datetime

from config import Config
from http_crawler import HTTPCrawler
from selenium_crawler import SeleniumCrawler
from data_storage import DataStorage
from proxy_manager import ProxyManager, ProxyRotator
from anti_detection import AntiDetection

logger = logging.getLogger(__name__)

class CrawlerManager:
    """爬虫管理器 - 整合所有爬虫功能的主类"""
    
    def __init__(self, config_path: str = "config.json"):
        self.config = Config(config_path)
        self.storage = DataStorage(self.config)
        self.anti_detection = AntiDetection(self.config)
        
        # 初始化爬虫实例
        self.http_crawler = None
        self.selenium_crawler = None
        
        # 代理管理
        self.proxy_manager = None
        self.proxy_rotator = None
        
        # 统计信息
        self.stats = {
            'total_requests': 0,
            'successful_requests': 0,
            'failed_requests': 0,
            'start_time': None,
            'end_time': None
        }
        
        self._setup_proxy_manager()
        logger.info("CrawlerManager initialized")
    
    def _setup_proxy_manager(self):
        """设置代理管理器"""
        proxy_settings = self.config.get('proxy_settings', {})
        if proxy_settings.get('enabled', False):
            self.proxy_manager = ProxyManager(self.config)
            proxy_list = proxy_settings.get('proxy_list', [])
            if proxy_list:
                self.proxy_manager.load_proxies_from_list(proxy_list)
                self.proxy_manager.test_all_proxies()
                self.proxy_rotator = ProxyRotator(self.proxy_manager)
                logger.info("Proxy manager setup completed")
    
    def get_http_crawler(self) -> HTTPCrawler:
        """获取HTTP爬虫实例"""
        if not self.http_crawler:
            self.http_crawler = HTTPCrawler(self.config)
        return self.http_crawler
    
    def get_selenium_crawler(self) -> SeleniumCrawler:
        """获取Selenium爬虫实例"""
        if not self.selenium_crawler:
            self.selenium_crawler = SeleniumCrawler(self.config)
        return self.selenium_crawler
    
    def crawl_single_url(self, url: str, method: str = 'http', **kwargs) -> Optional[Dict[str, Any]]:
        """爬取单个URL"""
        self.stats['total_requests'] += 1
        start_time = time.time()
        
        try:
            content = None
            
            if method.lower() == 'http':
                crawler = self.get_http_crawler()
                headers = self.anti_detection.get_random_headers(
                    custom_headers=kwargs.get('headers', {})
                )
                content = crawler.get_page(url, headers=headers, **kwargs)
            
            elif method.lower() == 'selenium':
                crawler = self.get_selenium_crawler()
                content = crawler.get_page(url, **kwargs)
            
            else:
                raise ValueError(f"Unsupported crawler method: {method}")
            
            response_time = time.time() - start_time
            
            if content:
                self.stats['successful_requests'] += 1
                
                # 检查反爬虫检测信号
                detection_signals = self.anti_detection.check_detection_signals(content, 200)
                
                result = {
                    'url': url,
                    'content': content,
                    'method': method,
                    'response_time': response_time,
                    'timestamp': datetime.now().isoformat(),
                    'detection_signals': detection_signals
                }
                
                # 解析数据（如果提供了选择器）
                if 'selectors' in kwargs:
                    if method.lower() == 'http':
                        parsed_data = crawler.parse_page(content, kwargs['selectors'])
                        result['parsed_data'] = parsed_data
                
                # 自适应延时
                self.anti_detection.adaptive_delay(response_time, 200)
                
                return result
            else:
                self.stats['failed_requests'] += 1
                logger.error(f"Failed to crawl URL: {url}")
                return None
                
        except Exception as e:
            self.stats['failed_requests'] += 1
            logger.error(f"Error crawling URL {url}: {str(e)}")
            return None
    
    def crawl_multiple_urls(self, urls: List[str], method: str = 'http', 
                           max_workers: int = 5, save_results: bool = True, **kwargs) -> List[Dict[str, Any]]:
        """并发爬取多个URL"""
        self.stats['start_time'] = datetime.now()
        results = []
        
        logger.info(f"Starting to crawl {len(urls)} URLs with {max_workers} workers")
        
        with ThreadPoolExecutor(max_workers=max_workers) as executor:
            future_to_url = {
                executor.submit(self.crawl_single_url, url, method, **kwargs): url
                for url in urls
            }
            
            for future in as_completed(future_to_url):
                url = future_to_url[future]
                try:
                    result = future.result()
                    if result:
                        results.append(result)
                        logger.info(f"Successfully crawled: {url}")
                    else:
                        logger.warning(f"Failed to crawl: {url}")
                except Exception as e:
                    logger.error(f"Error processing {url}: {str(e)}")
        
        self.stats['end_time'] = datetime.now()
        
        # 保存结果
        if save_results and results:
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            self.storage.save_to_json(results, f"crawl_results_{timestamp}.json")
            logger.info(f"Saved {len(results)} results to file")
        
        return results
    
    def crawl_with_pagination(self, base_url: str, page_param: str = 'page', 
                             max_pages: int = 10, method: str = 'http', **kwargs) -> List[Dict[str, Any]]:
        """支持分页的爬取"""
        results = []
        
        for page in range(1, max_pages + 1):
            if '?' in base_url:
                url = f"{base_url}&{page_param}={page}"
            else:
                url = f"{base_url}?{page_param}={page}"
            
            logger.info(f"Crawling page {page}: {url}")
            
            result = self.crawl_single_url(url, method, **kwargs)
            if result:
                results.append(result)
                
                # 检查是否到达最后一页
                if 'next_page_selector' in kwargs:
                    content = result.get('content', '')
                    if method.lower() == 'http':
                        from bs4 import BeautifulSoup
                        soup = BeautifulSoup(content, 'html.parser')
                        next_page = soup.select_one(kwargs['next_page_selector'])
                        if not next_page:
                            logger.info(f"No more pages found at page {page}")
                            break
            else:
                logger.warning(f"Failed to crawl page {page}, stopping pagination")
                break
        
        return results
    
    def crawl_with_login(self, login_url: str, credentials: Dict[str, str], 
                        target_urls: List[str], method: str = 'selenium', **kwargs) -> List[Dict[str, Any]]:
        """需要登录的爬取"""
        if method.lower() != 'selenium':
            logger.error("Login crawling requires Selenium")
            return []
        
        crawler = self.get_selenium_crawler()
        
        try:
            # 登录过程
            logger.info(f"Attempting login at: {login_url}")
            login_page = crawler.get_page(login_url)
            
            if not login_page:
                logger.error("Failed to load login page")
                return []
            
            # 填写登录表单
            if 'login_form' in kwargs:
                form_data = {}
                for field, value in credentials.items():
                    if field in kwargs['login_form']:
                        form_data[kwargs['login_form'][field]] = value
                
                if crawler.fill_form(form_data):
                    # 点击登录按钮
                    if 'login_button' in kwargs:
                        if crawler.click_element(kwargs['login_button']):
                            logger.info("Login successful")
                            time.sleep(3)  # 等待登录完成
                        else:
                            logger.error("Failed to click login button")
                            return []
                    else:
                        logger.error("Login button selector not provided")
                        return []
                else:
                    logger.error("Failed to fill login form")
                    return []
            
            # 爬取目标页面
            results = []
            for url in target_urls:
                result = self.crawl_single_url(url, 'selenium', **kwargs)
                if result:
                    results.append(result)
            
            return results
            
        except Exception as e:
            logger.error(f"Login crawling failed: {str(e)}")
            return []
    
    def save_results(self, results: List[Dict[str, Any]], format: str = 'json', 
                    filename: Optional[str] = None) -> bool:
        """保存爬取结果"""
        if not results:
            logger.warning("No results to save")
            return False
        
        if not filename:
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            filename = f"crawl_results_{timestamp}.{format}"
        
        try:
            if format.lower() == 'json':
                return self.storage.save_to_json(results, filename)
            elif format.lower() == 'csv':
                return self.storage.save_to_csv(results, filename)
            elif format.lower() == 'excel':
                return self.storage.export_to_excel(results, filename)
            else:
                logger.error(f"Unsupported format: {format}")
                return False
        except Exception as e:
            logger.error(f"Failed to save results: {str(e)}")
            return False
    
    def get_statistics(self) -> Dict[str, Any]:
        """获取爬取统计信息"""
        stats = self.stats.copy()
        
        if stats['start_time'] and stats['end_time']:
            duration = stats['end_time'] - stats['start_time']
            stats['duration_seconds'] = duration.total_seconds()
            
            if stats['total_requests'] > 0:
                stats['success_rate'] = stats['successful_requests'] / stats['total_requests']
                stats['average_time_per_request'] = stats['duration_seconds'] / stats['total_requests']
        
        # 添加存储统计
        stats['storage_stats'] = self.storage.get_storage_stats()
        
        # 添加代理统计
        if self.proxy_manager:
            stats['proxy_stats'] = self.proxy_manager.get_proxy_stats()
        
        return stats
    
    def close(self):
        """关闭所有资源"""
        if self.http_crawler:
            self.http_crawler.close()
        
        if self.selenium_crawler:
            self.selenium_crawler.close()
        
        if self.storage:
            self.storage.close()
        
        logger.info("CrawlerManager closed")