import json
import random
import time

import pymysql
from selenium import webdriver
from selenium.common import NoSuchElementException
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC


def get_news_info(page=1):
    # 设置 ChromeDriver 路径
    chrome_driver_path = "/usr/local/bin/chromedriver"
    # 配置 Chrome 选项
    options = Options()
    options.add_argument("--disable-blink-features=AutomationControlled")
    #options.add_argument("--headless")  # 启用无头模式
    options.add_argument("--disable-gpu")  # 禁用 GPU 加速（某些情况下需要）
    driver = webdriver.Chrome(service=Service(executable_path=chrome_driver_path), options=options)

    driver.get("https://spa4.scrape.center")

    # 等待直到找到所有的 el-card 元素
    cards = WebDriverWait(driver, 10).until(
        EC.presence_of_all_elements_located((By.CSS_SELECTOR, '#index .el-card'))
    )

    # 遍历每个 el-card 元素
    for card in cards:
        # 获取标题
        title = card.find_element(By.CSS_SELECTOR, 'h3 a').text
        # 获取发布时间
        publish_time = card.find_element(By.CSS_SELECTOR, 'p.info span').text
        # 获取图片 URL
        try:
            img_url = card.find_element(By.CSS_SELECTOR, '.thumb img').get_attribute('src')
        except NoSuchElementException:
            img_url = 'null'
        # 打印信息
        print(f"Title: {title}")
        print(f"Publish Time: {publish_time}")
        print(f"Image URL: {img_url}")
        print("-" * 40)

    driver.quit()

if __name__ == '__main__':
    get_news_info()
