import time

import requests
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC

# 目标网址
url = 'https://weixin.sogou.com/weixin?query=NBA&_sug_type_=&s_from=input&_sug_=y&type=2&page=1&ie=utf8'

# 设置请求头，模拟浏览器访问
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'
}

try:
    # 发送GET请求
    response = requests.get(url, headers=headers)
    response.raise_for_status()  # 检查请求是否成功

    # 解析HTML内容
    soup = BeautifulSoup(response.text, 'html.parser')

    # 提取所有的<h1>和<h2>标签
    lis = soup.select('.news-list li')

    # 打印每个标题的文本内容
    for li in lis:
        tag_a = li.select_one('.txt-box a')
        href = tag_a.get('href')
        url = 'https://weixin.sogou.com/' + href
        text = tag_a.text
        print(href)
        # # 配置 Chrome 选项
        # options = Options()
        # options.add_argument("--disable-blink-features=AutomationControlled")
        # driver = webdriver.Chrome(options=options)
        # driver.maximize_window()
        # driver.get(url)
        # time.sleep(2)
        # page_content = driver.find_element(By.ID, 'page-content')
        # print(text)
        # driver.close()

except requests.RequestException as e:
    # 处理请求异常
    print(f"请求失败: {e}")
except Exception as e:
    # 处理其他异常
    print(f"发生错误: {e}")
