from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from bs4 import BeautifulSoup
import time
import random


def getDataWithSelenium(url, scroll_times=3):
    # 设置 Chrome 选项
    chrome_options = Options()
    chrome_options.add_argument("--headless")  # 启用无头模式
    chrome_options.add_argument("--no-sandbox")
    chrome_options.add_argument("--disable-dev-shm-usage")
    chrome_options.add_argument(
        "--user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36")

    # 启动 Chrome 浏览器
    driver = webdriver.Chrome(options=chrome_options)

    try:
        driver.get(url)
        # 等待页面初始加载完成
        time.sleep(3)

        print(f"开始模拟滑动，共滑动 {scroll_times} 次...")

        # 模拟滑动加载更多内容
        for i in range(scroll_times):
            print(f"第 {i + 1} 次滑动...")

            # 方法1: 滚动到页面底部
            driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")

            # 等待新内容加载
            time.sleep(random.uniform(2, 4))  # 随机等待2-4秒，模拟真实用户行为

            # 方法2: 模拟按键滑动（备用方案）
            # body = driver.find_element("tag name", "body")
            # body.send_keys(Keys.PAGE_DOWN)

            # 方法3: 渐进式滑动（更自然）
            current_height = driver.execute_script("return document.body.scrollHeight")
            scroll_step = current_height // 3  # 每次滑动1/3页面高度

            for step in range(3):
                scroll_position = scroll_step * (step + 1)
                driver.execute_script(f"window.scrollTo(0, {scroll_position});")
                time.sleep(random.uniform(0.5, 1.5))  # 短暂停留

        # driver.execute_script("window.scrollTo(0, 0);")
        time.sleep(2)

        print("滑动完成，开始获取页面数据...")

        # 获取渲染后的页面
        html = driver.page_source
        soup = BeautifulSoup(html, 'html.parser')

        # 找你想要的数据
        items = soup.select('.channel-feed-item')
        print(f'滑动后共找到 {len(items)} 条新闻')

        all_news = []  # 存储所有新闻数据

        for i, item in enumerate(items, 1):
            # 获取标题元素，然后提取文本内容
            title_element = (item.select_one('.question-title-text') or
                             item.select_one('.article-title-text') or
                             item.select_one('.question-title') or
                             item.select_one('a'))
            title = title_element.get_text(strip=True) if title_element else "无标题"

            # 获取链接
            link_element = item.select_one('a[href]')
            link = link_element.get('href') if link_element else "无链接"
            if link and link.startswith('/'):
                link = 'https://news.qq.com' + link

            # 获取来源元素，然后提取文本内容
            source_element = item.select_one('.media-name')
            source = source_element.get_text(strip=True) if source_element else "未知来源"

            # 获取时间
            time_element = item.select_one('.time')
            time_text = time_element.get_text(strip=True) if time_element else "无时间"

            # 获取收藏数
            collect_element = (item.select_one('.article-collect .interation-num') or
                               item.select_one('[class*="collect"]'))
            collect_num = collect_element.get_text(strip=True) if collect_element else "0"

            # 获取评论数
            comment_element = item.select_one('.article-comment .interation-num')
            comment_num = comment_element.get_text(strip=True) if comment_element else "0"

            news_item = {
                'index': i,
                'title': title,
                'link': link,
                'source': source,
                'time': time_text,
                'comments': comment_num,
                'collections': collect_num
            }

            all_news.append(news_item)

            print(f'第{i}条 - 标题: {title[:50]}{"..." if len(title) > 50 else ""}')
            print(f'       来源: {source}')
            print(f'       时间: {time_text}')
            print(f'       评论: {comment_num} | 收藏: {collect_num}')
            print(f'       链接: {link}')
            print('------')

        return all_news

    except Exception as e:
        print(f'发生错误: {e}')
        return []
    finally:
        driver.quit()


if __name__ == '__main__':
    url = "https://news.qq.com/"

    # 调用函数，可以指定滑动次数
    news_data = getDataWithSelenium(url, scroll_times=5)

    print(f"\n=== 总结 ===")
    print(f"总共获取到 {len(news_data)} 条新闻")

    # 将数据保存到文件
    import json
    with open('news_data.json', 'w', encoding='utf-8') as f:
        json.dump(news_data, f, ensure_ascii=False, indent=2)