import os
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from webdriver_manager.chrome import ChromeDriverManager
from bs4 import BeautifulSoup
import openpyxl

# 设置Chrome驱动
options = webdriver.ChromeOptions()
# 不使用无头模式，浏览器界面会打开
driver = webdriver.Chrome(service=Service(ChromeDriverManager().install()), options=options)

# 创建Excel工作簿
wb = openpyxl.Workbook()
ws = wb.active
ws.append(["title", "content", "img_url", "original_url"])  # 添加表头

base_url = "https://www.vaporfi.com/blog/?page="

# 遍历每一页
for page_num in range(1, 46):
    url = base_url + str(page_num)
    driver.get(url)

    # 等待用户手动完成验证码验证
    input(f"请手动完成验证码验证，完成后按回车继续... (当前页: {page_num})")

    # 获取页面内容
    page_content = driver.page_source
    print(f"成功访问第{page_num}页")

    # 使用BeautifulSoup解析页面内容
    soup = BeautifulSoup(page_content, 'html.parser')

    # 查找所有文章链接
    links = soup.find_all('a', class_='image-wrap')  # 根据class选择器来查找
    for link in links:
        article_url = link.get('href')
        if article_url:
            print(f"正在访问文章链接: {article_url}")
            driver.get(article_url)

            # 等待页面加载完成
            driver.implicitly_wait(5)

            # 获取文章页面的内容
            article_page_content = driver.page_source
            article_soup = BeautifulSoup(article_page_content, 'html.parser')

            # 提取文章标题
            title = article_soup.find('h1', class_='text-sm').text.strip() if article_soup.find('h1',
                                                                                                class_='text-sm') else 'No Title'

            # 提取文章内容
            content_div = article_soup.find('div', class_='_post-description clearfix')
            content = content_div.get_text(separator="\n", strip=True) if content_div else 'No Content'

            # 提取第一张图片链接
            img_url = None
            img_tag = article_soup.find('img')
            if img_tag and img_tag.get('src'):
                img_url = img_tag['src']

            # 保存数据到Excel
            ws.append([title, content, img_url, article_url])
            print(f"成功保存文章: {title}")

# 保存Excel文件
wb.save("articles.xlsx")
print(f"所有文章已成功保存至 articles.xlsx")

# 退出浏览器
driver.quit()
