from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
import time


class HierarchicalScraper:
    def __init__(self, driver_path, start_url, max_layers=3):
        """
        初始化爬虫类
        :param driver_path: 浏览器驱动路径
        :param start_url: 初始URL
        :param max_layers: 最大爬取层数
        """
        self.driver_path = driver_path
        self.start_url = start_url
        self.max_layers = max_layers
        self.driver = None

    def start_browser(self):
        """
        启动浏览器
        """
        service = Service(self.driver_path)
        self.driver = webdriver.Chrome(service=service)

    def stop_browser(self):
        """
        关闭浏览器
        """
        if self.driver:
            self.driver.quit()

    def get_page_title_and_links(self, url, layer):
        """
        访问页面，返回该页面的标题和所有子链接
        :param url: 页面URL
        :param layer: 当前层级
        :return: 页面标题和所有子链接
        """
        print(f"正在爬取第 {layer} 层, URL: {url}")

        self.driver.get(url)
        time.sleep(5)  # 等待页面加载

        # 获取标题
        title = self.driver.title
        print(f"第 {layer} 层标题: {title}")

        # 获取所有a标签并提取子链接
        links = self.driver.find_elements(By.TAG_NAME, "a")
        urls = [link.get_attribute('href') for link in links if
                link.get_attribute('href') and link.get_attribute('href').startswith('http')]

        return title, urls

    def scrape_hierarchy(self):
        """
        递归爬取页面的层级结构，最多爬取 max_layers 层
        :return: 每层页面标题和对应链接的字典
        """
        self.start_browser()

        layers = {1: [self.start_url]}  # 存储每一层的URL
        titles = {1: []}  # 存储每一层的标题和对应的URL

        try:
            for layer in range(1, self.max_layers + 1):
                if not layers[layer]:
                    break
                next_layer_urls = []
                titles[layer] = []

                for current_url in layers[layer]:
                    title, links = self.get_page_title_and_links(current_url, layer)
                    titles[layer].append((current_url, title))
                    if layer < self.max_layers:
                        next_layer_urls.extend(links)

                if next_layer_urls:
                    layers[layer + 1] = next_layer_urls
                else:
                    break
        finally:
            self.stop_browser()  # 确保在爬取完成后关闭浏览器

        return titles


# 使用示例
if __name__ == "__main__":
    # 实例化爬虫类，传入浏览器驱动路径，初始链接和最大层数
    driver_path = "/Applications/Google Chrome.app/Contents/MacOS/Google Chrome"
    start_url = "https://www.baidu.com/"

    scraper = HierarchicalScraper(driver_path, start_url, max_layers=3)

    # 开始爬取
    hierarchy_data = scraper.scrape_hierarchy()

    # 打印爬取结果
    for layer, data in hierarchy_data.items():
        print(f"第 {layer} 层:")
        for url, title in data:
            print(f"URL: {url}, 标题: {title}")