#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
简洁的网页爬虫类
支持网页数据抓取、截图、资源下载等功能
"""

import time
import os
import json
import requests
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.options import Options
from selenium.common.exceptions import TimeoutException, NoSuchElementException
from urllib.parse import urljoin, urlparse


class WebScraper:
    def __init__(self, headless=False, timeout=10):
        """
        初始化爬虫
        :param headless: 是否使用无头模式
        :param timeout: 等待超时时间（秒）
        """
        self.driver = None
        self.wait = None
        self.timeout = timeout
        self.setup_driver(headless)
        
    def setup_driver(self, headless):
        """设置Chrome浏览器驱动"""
        chrome_options = Options()
        
        if headless:
            chrome_options.add_argument("--headless")
        
        # 添加Chrome选项以提高稳定性和性能
        chrome_options.add_argument("--no-sandbox")
        chrome_options.add_argument("--disable-dev-shm-usage")
        chrome_options.add_argument("--disable-gpu")
        chrome_options.add_argument("--disable-blink-features=AutomationControlled")
        chrome_options.add_argument("--window-size=1920,1080")
        chrome_options.add_experimental_option("excludeSwitches", ["enable-automation"])
        chrome_options.add_experimental_option('useAutomationExtension', False)
        chrome_options.add_argument("--user-agent=Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/138.0.0.0 Safari/537.36")
        
        try:
            print("🔄 初始化ChromeDriver...")
            self.driver = webdriver.Chrome(options=chrome_options)
            self.driver.execute_script("Object.defineProperty(navigator, 'webdriver', {get: () => undefined})")
            self.wait = WebDriverWait(self.driver, self.timeout)
            print("✅ ChromeDriver初始化成功")
        except Exception as e:
            print(f"❌ ChromeDriver初始化失败: {e}")
            print("请确保已正确安装ChromeDriver并添加到系统PATH中")
            raise
    
    def navigate_to(self, url, wait_time=3):
        """
        导航到指定网站
        :param url: 目标网站URL
        :param wait_time: 页面加载等待时间（秒）
        :return: 是否成功加载页面
        """
        try:
            print(f"🌐 正在访问: {url}")
            
            # 设置页面加载超时
            self.driver.set_page_load_timeout(30)
            
            # 访问网站
            self.driver.get(url)
            
            # 等待页面加载
            print("⏳ 等待页面加载...")
            time.sleep(wait_time)
            
            # 等待页面标题出现
            try:
                self.wait.until(lambda driver: driver.title and len(driver.title.strip()) > 0)
            except TimeoutException:
                print("⚠️ 页面标题加载超时，继续执行...")
            
            # 获取页面信息
            title = self.driver.title
            current_url = self.driver.current_url
            
            print(f"📄 页面标题: {title}")
            print(f"📍 当前URL: {current_url}")
            
            return True
            
        except Exception as e:
            print(f"❌ 访问失败: {e}")
            return False
    
    def get_page_info(self, max_links=10, max_images=10, text_length=500):
        """
        获取页面基本信息
        :param max_links: 最大链接数量
        :param max_images: 最大图片数量  
        :param text_length: 文本预览长度
        :return: 页面信息字典
        """
        try:
            # 获取所有链接
            links = self.driver.find_elements(By.TAG_NAME, "a")
            link_urls = [link.get_attribute("href") for link in links if link.get_attribute("href")]
            
            # 获取所有图片
            images = self.driver.find_elements(By.TAG_NAME, "img")
            image_urls = [img.get_attribute("src") for img in images if img.get_attribute("src")]
            
            # 获取页面文本内容
            try:
                body_text = self.driver.find_element(By.TAG_NAME, "body").text
            except:
                body_text = ""
            
            page_info = {
                "title": self.driver.title,
                "url": self.driver.current_url,
                "links_count": len(link_urls),
                "images_count": len(image_urls),
                "links": link_urls[:max_links],
                "images": image_urls[:max_images],
                "text_preview": body_text[:text_length] + "..." if len(body_text) > text_length else body_text,
                "timestamp": time.strftime("%Y-%m-%d %H:%M:%S")
            }
            
            return page_info
            
        except Exception as e:
            print(f"❌ 获取页面信息失败: {e}")
            return None
    
    def take_screenshot(self, filename="screenshot.png", save_dir="downloads"):
        """
        截取页面截图
        :param filename: 截图文件名
        :param save_dir: 保存目录
        :return: 截图文件路径
        """
        try:
            # 确保保存目录存在
            os.makedirs(save_dir, exist_ok=True)
            
            screenshot_path = os.path.join(save_dir, filename)
            self.driver.save_screenshot(screenshot_path)
            print(f"📸 截图已保存: {screenshot_path}")
            return screenshot_path
        except Exception as e:
            print(f"❌ 截图失败: {e}")
            return None
    
    def download_images(self, max_count=5, save_dir="downloads", timeout=10):
        """
        下载页面图片
        :param max_count: 最大下载数量
        :param save_dir: 保存目录
        :param timeout: 下载超时时间
        :return: 下载成功的文件路径列表
        """
        try:
            # 创建下载目录
            os.makedirs(save_dir, exist_ok=True)
            
            # 获取所有图片
            images = self.driver.find_elements(By.TAG_NAME, "img")
            downloaded_files = []
            
            for i, img in enumerate(images[:max_count]):
                try:
                    src = img.get_attribute("src")
                    if src and src.startswith(('http://', 'https://')):
                        # 获取文件扩展名
                        ext = os.path.splitext(urlparse(src).path)[1] or '.jpg'
                        filename = f"image_{i+1}{ext}"
                        filepath = os.path.join(save_dir, filename)
                        
                        response = requests.get(src, timeout=timeout, headers={
                            'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36'
                        })
                        
                        if response.status_code == 200:
                            with open(filepath, 'wb') as f:
                                f.write(response.content)
                            downloaded_files.append(filepath)
                            print(f"📥 图片下载成功: {filename}")
                        
                except Exception as e:
                    print(f"❌ 图片下载失败: {e}")
            
            return downloaded_files
            
        except Exception as e:
            print(f"❌ 下载过程失败: {e}")
            return []
    
    def find_elements_by_selector(self, selector, attribute=None):
        """
        通过CSS选择器查找元素
        :param selector: CSS选择器
        :param attribute: 要获取的属性名（可选）
        :return: 元素列表或属性值列表
        """
        try:
            elements = self.driver.find_elements(By.CSS_SELECTOR, selector)
            if attribute:
                return [elem.get_attribute(attribute) for elem in elements if elem.get_attribute(attribute)]
            return elements
        except Exception as e:
            print(f"❌ 查找元素失败: {e}")
            return []
    
    def wait_for_element(self, selector, timeout=None):
        """
        等待元素出现
        :param selector: CSS选择器
        :param timeout: 超时时间（秒）
        :return: 找到的元素或None
        """
        try:
            wait_time = timeout or self.timeout
            element = WebDriverWait(self.driver, wait_time).until(
                EC.presence_of_element_located((By.CSS_SELECTOR, selector))
            )
            return element
        except TimeoutException:
            print(f"⚠️ 等待元素超时: {selector}")
            return None
        except Exception as e:
            print(f"❌ 等待元素失败: {e}")
            return None
    
    def click_element(self, selector, timeout=None):
        """
        点击元素
        :param selector: CSS选择器
        :param timeout: 超时时间（秒）
        :return: 是否成功点击
        """
        try:
            element = self.wait_for_element(selector, timeout)
            if element:
                element.click()
                print(f"✅ 成功点击元素: {selector}")
                return True
            return False
        except Exception as e:
            print(f"❌ 点击元素失败: {e}")
            return False
    
    def get_text(self, selector):
        """
        获取元素文本内容
        :param selector: CSS选择器
        :return: 文本内容或空字符串
        """
        try:
            element = self.driver.find_element(By.CSS_SELECTOR, selector)
            return element.text
        except:
            return ""
    
    def save_data(self, data, filename="scraped_data.json", save_dir="downloads"):
        """
        保存数据到JSON文件
        :param data: 要保存的数据
        :param filename: 文件名
        :param save_dir: 保存目录
        :return: 保存的文件路径
        """
        try:
            os.makedirs(save_dir, exist_ok=True)
            filepath = os.path.join(save_dir, filename)
            
            with open(filepath, 'w', encoding='utf-8') as f:
                json.dump(data, f, ensure_ascii=False, indent=2)
            
            print(f"💾 数据已保存: {filepath}")
            return filepath
        except Exception as e:
            print(f"❌ 数据保存失败: {e}")
            return None
    
    def close(self):
        """关闭浏览器"""
        if self.driver:
            self.driver.quit()
            print("🔒 浏览器已关闭")
