"""网页内容抓取器实现

支持从网页中提取标题、正文内容和图片链接，
使用多种策略确保在不同网站结构下都能正常工作。
"""
from __future__ import annotations

import os
import re
import time
import urllib.parse
from dataclasses import dataclass
from typing import List, Optional, Dict, Any
import tempfile
import requests

try:
    from selenium import webdriver
    from selenium.webdriver.chrome.options import Options
    from selenium.webdriver.common.by import By
    from selenium.webdriver.support.ui import WebDriverWait
    from selenium.webdriver.support import expected_conditions as EC
    from selenium.common.exceptions import TimeoutException, WebDriverException
except ImportError as e:
    print(f"警告: 缺少selenium库 {e}，请安装: pip install selenium")
    webdriver = None

try:
    from bs4 import BeautifulSoup
except ImportError as e:
    print(f"警告: 缺少beautifulsoup4库 {e}，请安装: pip install beautifulsoup4")
    BeautifulSoup = None


def _bool(env_name: str, default: bool = False) -> bool:
    return os.getenv(env_name, str(default)).strip().lower() in {"1", "true", "yes", "on"}


def _env(name: str, default: str = "") -> str:
    return os.getenv(name, default)


@dataclass
class ScrapedContent:
    """抓取到的网页内容"""
    url: str
    title: str
    content: str
    images: List[str]
    meta_description: Optional[str] = None
    author: Optional[str] = None
    publish_date: Optional[str] = None
    tags: List[str] = None
    error: Optional[str] = None
    
    def __post_init__(self):
        if self.tags is None:
            self.tags = []


@dataclass
class ScraperConfig:
    """抓取器配置"""
    headless: bool = _bool("SCRAPER_HEADLESS", True)
    user_data_dir: Optional[str] = _env("SCRAPER_USER_DATA_DIR") or None
    wait_timeout: int = int(_env("SCRAPER_WAIT_TIMEOUT", "15"))
    download_images: bool = _bool("SCRAPER_DOWNLOAD_IMAGES", False)
    image_download_dir: Optional[str] = _env("SCRAPER_IMAGE_DIR") or None
    max_images: int = int(_env("SCRAPER_MAX_IMAGES", "10"))
    min_image_size: int = int(_env("SCRAPER_MIN_IMAGE_SIZE", "200"))  # 最小图片尺寸（像素）
    user_agent: str = _env("SCRAPER_USER_AGENT", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36")


class WebScraper:
    """网页内容抓取器
    
    使用 Selenium 和 BeautifulSoup 相结合的方式，
    能够处理动态加载的内容和各种网站结构。
    """
    
    def __init__(self, config: ScraperConfig | None = None):
        self.config = config or ScraperConfig()
        self.driver = None
        self.session = requests.Session()
        self.session.headers.update({
            'User-Agent': self.config.user_agent
        })
    
    def _build_driver(self):
        """构建 Chrome WebDriver"""
        if webdriver is None:
            raise RuntimeError("selenium 未安装，请运行: pip install selenium")
            
        options = Options()
        if self.config.headless:
            options.add_argument("--headless=new")
        options.add_argument("--disable-gpu")
        options.add_argument("--no-sandbox")
        options.add_argument("--disable-dev-shm-usage")
        options.add_argument("--disable-blink-features=AutomationControlled")
        options.add_experimental_option("excludeSwitches", ["enable-automation"])
        options.add_experimental_option('useAutomationExtension', False)
        
        if self.config.user_data_dir:
            options.add_argument(f"--user-data-dir={self.config.user_data_dir}")
        
        options.add_argument("--window-size=1280,900")
        options.add_argument(f"--user-agent={self.config.user_agent}")
        
        driver = webdriver.Chrome(options=options)
        # 隐藏 webdriver 特征
        driver.execute_script("Object.defineProperty(navigator, 'webdriver', {get: () => undefined})")
        return driver
    
    def ensure_driver(self):
        """确保 WebDriver 可用"""
        if self.driver is None:
            self.driver = self._build_driver()
        return self.driver
    
    def _extract_title(self, soup: Any, driver=None) -> str:
        """提取页面标题"""
        if soup is None:
            return "未找到标题"
        # 优先级：h1 > og:title > title > 自动探测
        
        # 1. 查找主标题 h1
        h1_selectors = [
            'h1.title', 'h1.article-title', 'h1.post-title',
            'h1[class*="title"]', 'h1[class*="headline"]',
            '.article-header h1', '.post-header h1',
            'article h1', 'main h1', '.content h1'
        ]
        
        for selector in h1_selectors:
            try:
                element = soup.select_one(selector)
                if element and element.get_text(strip=True):
                    return element.get_text(strip=True)
            except Exception:
                continue
        
        # 2. 查找通用 h1
        h1_tags = soup.find_all('h1')
        if h1_tags:
            for h1 in h1_tags:
                text = h1.get_text(strip=True)
                if text and len(text) > 10:  # 长度合理的标题
                    return text
        
        # 3. Open Graph 标题
        og_title = soup.find('meta', property='og:title')
        if og_title and og_title.get('content'):
            return og_title['content'].strip()
        
        # 4. 页面 title 标签
        title_tag = soup.find('title')
        if title_tag and title_tag.get_text(strip=True):
            title = title_tag.get_text(strip=True)
            # 清理常见的网站名称后缀
            patterns = [
                r'\s*[-|_]\s*.*$',  # 去掉 - 或 | 后的内容
                r'\s*-\s*\w+\.com.*$',  # 去掉网站名
            ]
            for pattern in patterns:
                title = re.sub(pattern, '', title, flags=re.IGNORECASE)
            if title.strip():
                return title.strip()
        
        # 5. 使用 Selenium 动态查找
        if driver:
            try:
                js_title = driver.execute_script("""
                    // 查找最可能的标题元素
                    const candidates = [];
                    
                    // H1 标签
                    document.querySelectorAll('h1').forEach(h1 => {
                        const text = h1.innerText.trim();
                        if (text.length > 10 && text.length < 200) {
                            candidates.push({text, score: 10, element: 'h1'});
                        }
                    });
                    
                    // 特定类名的元素
                    const titleSelectors = [
                        '[class*="title"]', '[class*="headline"]', 
                        '[class*="subject"]', '[class*="caption"]'
                    ];
                    
                    titleSelectors.forEach(sel => {
                        document.querySelectorAll(sel).forEach(el => {
                            const text = el.innerText.trim();
                            if (text.length > 10 && text.length < 200) {
                                candidates.push({text, score: 5, element: el.tagName});
                            }
                        });
                    });
                    
                    // 按分数排序，返回最佳候选
                    candidates.sort((a, b) => b.score - a.score);
                    return candidates.length > 0 ? candidates[0].text : '';
                """)
                if js_title:
                    return js_title
            except Exception:
                pass
        
        return "未找到标题"
    
    def _extract_content(self, soup: Any, driver=None) -> str:
        """提取正文内容"""
        if soup is None:
            return "未找到正文内容"
        # 优先级：article > main > 自动探测
        
        # 1. 查找文章内容区域
        content_selectors = [
            'article', '.article-content', '.post-content', '.entry-content',
            '.article-body', '.post-body', '.content-body',
            'main .content', '.main-content', '#content',
            '[class*="article"][class*="content"]',
            '[class*="post"][class*="content"]'
        ]
        
        for selector in content_selectors:
            try:
                element = soup.select_one(selector)
                if element:
                    # 清理内容：移除脚本、样式、导航等
                    for tag in element.find_all(['script', 'style', 'nav', 'aside', 'footer', 'header']):
                        tag.decompose()
                    
                    # 提取文本
                    text = element.get_text(separator='\n', strip=True)
                    if text and len(text) > 100:  # 确保内容足够长
                        # 清理多余的换行和空格
                        text = re.sub(r'\n\s*\n', '\n\n', text)
                        text = re.sub(r' +', ' ', text)
                        return text
            except Exception:
                continue
        
        # 2. 使用 Selenium 动态提取
        if driver:
            try:
                js_content = driver.execute_script("""
                    function extractContent() {
                        // 查找最可能的内容容器
                        const selectors = [
                            'article', '.article-content', '.post-content', 
                            '.entry-content', '.article-body', '.post-body',
                            'main', '.main-content', '#content'
                        ];
                        
                        for (const sel of selectors) {
                            const elements = document.querySelectorAll(sel);
                            for (const el of elements) {
                                // 移除干扰元素
                                const clone = el.cloneNode(true);
                                clone.querySelectorAll('script,style,nav,aside,footer,header,.sidebar,.ads').forEach(x => x.remove());
                                
                                const text = clone.innerText.trim();
                                if (text.length > 200) {
                                    return text;
                                }
                            }
                        }
                        
                        // 降级：查找段落密集区域
                        const paragraphs = Array.from(document.querySelectorAll('p'))
                            .map(p => p.innerText.trim())
                            .filter(text => text.length > 50);
                        
                        if (paragraphs.length > 2) {
                            return paragraphs.join('\\n\\n');
                        }
                        
                        return '';
                    }
                    
                    return extractContent();
                """)
                if js_content and len(js_content) > 100:
                    return js_content
            except Exception:
                pass
        
        # 3. 降级：收集所有段落
        paragraphs = []
        for p in soup.find_all('p'):
            text = p.get_text(strip=True)
            if text and len(text) > 20:
                paragraphs.append(text)
        
        if paragraphs:
            return '\n\n'.join(paragraphs[:10])  # 最多10段
        
        return "未找到正文内容"
    
    def _extract_images(self, soup: Any, base_url: str, driver=None) -> List[str]:
        """提取页面图片"""
        if soup is None:
            return []
        image_urls = set()
        
        # 1. 查找内容区域的图片
        content_areas = soup.select('article, .article-content, .post-content, .entry-content, main')
        if not content_areas:
            content_areas = [soup]  # 降级到整个页面
        
        for area in content_areas:
            # 查找 img 标签
            for img in area.find_all('img'):
                src = img.get('src') or img.get('data-src') or img.get('data-original')
                if src:
                    # 转换为绝对URL
                    absolute_url = urllib.parse.urljoin(base_url, src)
                    if self._is_valid_image_url(absolute_url):
                        image_urls.add(absolute_url)
        
        # 2. 使用 Selenium 查找懒加载图片
        if driver:
            try:
                # 滚动页面触发懒加载
                driver.execute_script("window.scrollTo(0, document.body.scrollHeight/2);")
                time.sleep(1)
                driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
                time.sleep(1)
                
                js_images = driver.execute_script("""
                    const images = new Set();
                    
                    // 查找所有可见图片
                    document.querySelectorAll('img').forEach(img => {
                        const rect = img.getBoundingClientRect();
                        if (rect.width > arguments[0] && rect.height > arguments[0]) {  // 最小尺寸过滤
                            const src = img.src || img.dataset.src || img.dataset.original;
                            if (src && src.startsWith('http')) {
                                images.add(src);
                            }
                        }
                    });
                    
                    // 查找背景图片
                    document.querySelectorAll('div, span, section').forEach(el => {
                        const style = window.getComputedStyle(el);
                        const bg = style.backgroundImage;
                        if (bg && bg.includes('url(')) {
                            const match = bg.match(/url\\(["']?([^"')]+)["']?\\)/);
                            if (match && match[1] && match[1].startsWith('http')) {
                                images.add(match[1]);
                            }
                        }
                    });
                    
                    return Array.from(images);
                """, self.config.min_image_size)
                
                for img_url in js_images:
                    if self._is_valid_image_url(img_url):
                        image_urls.add(img_url)
            except Exception:
                pass
        
        # 3. 按相关性排序（优先内容区域的图片）
        sorted_images = list(image_urls)[:self.config.max_images]
        
        # 4. 下载图片（如果配置了）
        if self.config.download_images and self.config.image_download_dir:
            downloaded_images = []
            for img_url in sorted_images:
                local_path = self._download_image(img_url)
                if local_path:
                    downloaded_images.append(local_path)
            return downloaded_images
        
        return sorted_images
    
    def _is_valid_image_url(self, url: str) -> bool:
        """验证图片URL是否有效"""
        if not url or not url.startswith(('http://', 'https://')):
            return False
        
        # 过滤常见的无用图片
        exclude_patterns = [
            r'\.gif$',  # 通常是装饰性GIF
            r'[/_]icon[s]?[/_]',  # 图标
            r'[/_]logo[s]?[/_]',  # Logo
            r'[/_]button[s]?[/_]',  # 按钮
            r'\.svg$',  # SVG图标
            r'1x1\.png',  # 像素追踪图片
            r'loading\.gif',  # 加载动画
            r'spacer\.gif',  # 占位图片
        ]
        
        for pattern in exclude_patterns:
            if re.search(pattern, url, re.IGNORECASE):
                return False
        
        return True
    
    def _download_image(self, img_url: str) -> Optional[str]:
        """下载图片到本地"""
        try:
            response = self.session.get(img_url, timeout=10, stream=True)
            response.raise_for_status()
            
            # 检查内容类型
            content_type = response.headers.get('content-type', '')
            if not content_type.startswith('image/'):
                return None
            
            # 生成文件名
            filename = os.path.basename(urllib.parse.urlparse(img_url).path)
            if not filename or '.' not in filename:
                ext = content_type.split('/')[-1]
                filename = f"image_{int(time.time())}.{ext}"
            
            # 确保下载目录存在
            os.makedirs(self.config.image_download_dir, exist_ok=True)
            local_path = os.path.join(self.config.image_download_dir, filename)
            
            # 下载文件
            with open(local_path, 'wb') as f:
                for chunk in response.iter_content(chunk_size=8192):
                    f.write(chunk)
            
            return local_path
        except Exception:
            return None
    
    def _extract_metadata(self, soup: Any) -> Dict[str, Any]:
        """提取页面元数据"""
        if soup is None:
            return {}
        metadata = {}
        
        # 描述
        desc = soup.find('meta', attrs={'name': 'description'}) or soup.find('meta', property='og:description')
        if desc and desc.get('content'):
            metadata['description'] = desc['content'].strip()
        
        # 作者
        author = soup.find('meta', attrs={'name': 'author'}) or soup.find('meta', property='article:author')
        if author and author.get('content'):
            metadata['author'] = author['content'].strip()
        else:
            # 查找页面中的作者信息
            author_selectors = ['.author', '.byline', '[class*="author"]', '[class*="writer"]']
            for selector in author_selectors:
                author_el = soup.select_one(selector)
                if author_el:
                    metadata['author'] = author_el.get_text(strip=True)
                    break
        
        # 发布时间
        pub_time = soup.find('meta', property='article:published_time')
        if pub_time and pub_time.get('content'):
            metadata['publish_date'] = pub_time['content'].strip()
        else:
            # 查找时间元素
            time_selectors = ['time', '.date', '.publish-time', '[class*="time"]', '[class*="date"]']
            for selector in time_selectors:
                time_el = soup.select_one(selector)
                if time_el:
                    datetime_attr = time_el.get('datetime')
                    if datetime_attr:
                        metadata['publish_date'] = datetime_attr
                    else:
                        metadata['publish_date'] = time_el.get_text(strip=True)
                    break
        
        # 标签
        tags = []
        tag_metas = soup.find_all('meta', property='article:tag')
        for tag_meta in tag_metas:
            if tag_meta.get('content'):
                tags.append(tag_meta['content'].strip())
        
        if not tags:
            # 查找页面标签
            tag_selectors = ['.tags', '.tag', '[class*="tag"]', '[class*="category"]']
            for selector in tag_selectors:
                tag_elements = soup.select(selector)
                for tag_el in tag_elements:
                    tag_text = tag_el.get_text(strip=True)
                    if tag_text and len(tag_text) < 50:  # 合理的标签长度
                        tags.append(tag_text)
        
        metadata['tags'] = tags[:10]  # 最多10个标签
        
        return metadata
    
    def scrape_url(self, url: str) -> ScrapedContent:
        """抓取指定URL的内容"""
        try:
            # 1. 使用 Selenium 加载页面
            driver = self.ensure_driver()
            driver.get(url)
            
            # 等待页面加载
            WebDriverWait(driver, self.config.wait_timeout).until(
                lambda d: d.execute_script("return document.readyState") == "complete"
            )
            
            # 额外等待动态内容
            time.sleep(2)
            
            # 2. 获取页面源码
            page_source = driver.page_source
            soup = BeautifulSoup(page_source, 'html.parser')
            
            # 3. 提取内容
            title = self._extract_title(soup, driver)
            content = self._extract_content(soup, driver)
            images = self._extract_images(soup, url, driver)
            metadata = self._extract_metadata(soup)
            
            return ScrapedContent(
                url=url,
                title=title,
                content=content,
                images=images,
                meta_description=metadata.get('description'),
                author=metadata.get('author'),
                publish_date=metadata.get('publish_date'),
                tags=metadata.get('tags', [])
            )
            
        except TimeoutException:
            return ScrapedContent(
                url=url, title="", content="", images=[],
                error="页面加载超时"
            )
        except WebDriverException as e:
            return ScrapedContent(
                url=url, title="", content="", images=[],
                error=f"WebDriver错误: {str(e)}"
            )
        except Exception as e:
            return ScrapedContent(
                url=url, title="", content="", images=[],
                error=f"抓取失败: {str(e)}"
            )
    
    def scrape_multiple_urls(self, urls: List[str]) -> List[ScrapedContent]:
        """批量抓取多个URL"""
        results = []
        for url in urls:
            result = self.scrape_url(url)
            results.append(result)
            # 避免过于频繁的请求
            time.sleep(1)
        return results
    
    def close(self):
        """关闭抓取器，释放资源"""
        if self.driver:
            try:
                self.driver.quit()
            except Exception:
                pass
            self.driver = None
        
        if hasattr(self, 'session'):
            self.session.close()
    
    def __enter__(self):
        return self
    
    def __exit__(self, exc_type, exc_val, exc_tb):
        self.close()


# 便捷函数
def scrape_webpage(url: str, download_images: bool = False, image_dir: Optional[str] = None) -> ScrapedContent:
    """便捷函数：抓取单个网页"""
    config = ScraperConfig()
    if download_images:
        config.download_images = True
        config.image_download_dir = image_dir or tempfile.mkdtemp(prefix="scraped_images_")
    
    with WebScraper(config) as scraper:
        return scraper.scrape_url(url)


def scrape_webpages(urls: List[str], download_images: bool = False, image_dir: Optional[str] = None) -> List[ScrapedContent]:
    """便捷函数：抓取多个网页"""
    config = ScraperConfig()
    if download_images:
        config.download_images = True
        config.image_download_dir = image_dir or tempfile.mkdtemp(prefix="scraped_images_")
    
    with WebScraper(config) as scraper:
        return scraper.scrape_multiple_urls(urls)