#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
网站内容收集器核心模块
整合浏览器自动化和内容解析功能
"""

import time
import logging
from typing import List, Dict, Any, Optional
from urllib.parse import urljoin, urlparse

from .browser_manager import BrowserManager
from .content_parser import ContentParser
from .config_manager import ConfigManager

class WebCollector:
    """网站内容收集器"""
    
    def __init__(self):
        self.config_manager = ConfigManager()
        self.content_parser = ContentParser()
        self.logger = logging.getLogger(__name__)
        
    def collect_site(self, site_config: Dict[str, Any]) -> List[Dict[str, Any]]:
        """收集指定网站的内容
        
        Args:
            site_config: 网站配置
            
        Returns:
            List[Dict]: 收集到的内容列表
        """
        site_name = site_config.get('name', '未知网站')
        site_url = site_config.get('url', '')
        
        if not site_url:
            self.logger.error(f"{site_name}: 缺少URL配置")
            return []
            
        self.logger.info(f"开始收集 {site_name} 的内容")
        
        browser_config = self.config_manager.get_browser_config()
        
        with BrowserManager(browser_config) as browser:
            try:
                # 启动浏览器
                if not browser.start_browser(use_existing_profile=True):
                    self.logger.error(f"{site_name}: 浏览器启动失败")
                    return []
                    
                # 导航到网站
                if not browser.navigate_to(site_url):
                    self.logger.error(f"{site_name}: 导航失败")
                    return []
                    
                # 执行登录（如果需要）
                if site_config.get('login_required', False):
                    if not browser.login(site_config):
                        self.logger.warning(f"{site_name}: 登录失败，尝试继续收集")
                        
                # 等待页面加载
                time.sleep(3)
                
                # 滚动加载更多内容（如果需要）
                if site_config.get('scroll_to_load', False):
                    max_scrolls = site_config.get('max_scroll_times', 3)
                    browser.scroll_to_load_content(max_scrolls)
                    
                # 获取页面源码
                html = browser.get_page_source()
                if not html:
                    self.logger.error(f"{site_name}: 获取页面源码失败")
                    return []
                    
                # 解析内容
                content_items = self.content_parser.parse_content(html, site_config, site_url)
                
                # 如果配置了详情页抓取，进一步获取详细内容
                if site_config.get('extract_detail', False) and content_items:
                    content_items = self._extract_detail_content(browser, content_items, site_config)
                    
                self.logger.info(f"{site_name}: 成功收集到 {len(content_items)} 条内容")
                return content_items
                
            except Exception as e:
                self.logger.error(f"{site_name}: 收集过程中出错: {e}")
                return []
                
    def _extract_detail_content(self, browser: BrowserManager, items: List[Dict[str, Any]], 
                              site_config: Dict[str, Any]) -> List[Dict[str, Any]]:
        """提取详情页内容"""
        detailed_items = []
        max_details = site_config.get('max_detail_pages', 5)  # 限制详情页数量
        
        for i, item in enumerate(items[:max_details]):
            link = item.get('link', '')
            if not link:
                detailed_items.append(item)
                continue
                
            try:
                self.logger.info(f"获取详情页: {link}")
                
                # 导航到详情页
                if browser.navigate_to(link):
                    time.sleep(2)  # 等待页面加载
                    
                    # 获取详情页源码
                    detail_html = browser.get_page_source()
                    if detail_html:
                        # 解析详情页内容
                        detail_content = self.content_parser.extract_article_content(detail_html, link)
                        
                        # 合并内容
                        if detail_content.get('content'):
                            item['content'] = detail_content['content']
                        if detail_content.get('title') and not item.get('title'):
                            item['title'] = detail_content['title']
                            
                detailed_items.append(item)
                
                # 避免请求过快
                time.sleep(1)
                
            except Exception as e:
                self.logger.error(f"获取详情页失败 {link}: {e}")
                detailed_items.append(item)  # 保留原始内容
                
        return detailed_items
        
    def collect_multiple_sites(self, site_names: List[str]) -> Dict[str, List[Dict[str, Any]]]:
        """收集多个网站的内容
        
        Args:
            site_names: 网站名称列表
            
        Returns:
            Dict: 网站名称到内容列表的映射
        """
        results = {}
        
        for site_name in site_names:
            site_config = self.config_manager.get_site_config(site_name)
            if site_config:
                content = self.collect_site(site_config)
                results[site_name] = content
            else:
                self.logger.warning(f"找不到网站配置: {site_name}")
                results[site_name] = []
                
        return results
        
    def test_site_config(self, site_config: Dict[str, Any]) -> Dict[str, Any]:
        """测试网站配置
        
        Args:
            site_config: 网站配置
            
        Returns:
            Dict: 测试结果
        """
        site_name = site_config.get('name', '测试网站')
        result = {
            'site_name': site_name,
            'success': False,
            'error': '',
            'content_count': 0,
            'sample_content': []
        }
        
        try:
            # 收集内容
            content = self.collect_site(site_config)
            
            result['success'] = len(content) > 0
            result['content_count'] = len(content)
            result['sample_content'] = content[:3]  # 返回前3条作为样本
            
            if not content:
                result['error'] = '没有收集到任何内容，请检查选择器配置'
                
        except Exception as e:
            result['error'] = str(e)
            
        return result
        
    def validate_site_config(self, site_config: Dict[str, Any]) -> Dict[str, Any]:
        """验证网站配置
        
        Args:
            site_config: 网站配置
            
        Returns:
            Dict: 验证结果
        """
        validation_result = {
            'valid': True,
            'errors': [],
            'warnings': []
        }
        
        # 必需字段检查
        required_fields = ['name', 'url']
        for field in required_fields:
            if not site_config.get(field):
                validation_result['errors'].append(f"缺少必需字段: {field}")
                validation_result['valid'] = False
                
        # URL格式检查
        url = site_config.get('url', '')
        if url:
            parsed = urlparse(url)
            if not parsed.scheme or not parsed.netloc:
                validation_result['errors'].append("URL格式无效")
                validation_result['valid'] = False
                
        # 登录配置检查
        if site_config.get('login_required', False):
            login_fields = ['username_selector', 'password_selector']
            for field in login_fields:
                if not site_config.get(field):
                    validation_result['warnings'].append(f"登录配置缺少字段: {field}")
                    
        # 内容选择器检查
        content_selectors = site_config.get('content_selectors', {})
        if not content_selectors:
            validation_result['warnings'].append("没有配置内容选择器")
        else:
            if not content_selectors.get('title'):
                validation_result['warnings'].append("没有配置标题选择器")
                
        return validation_result