import os
import re
import requests
from bs4 import BeautifulSoup
import pandas as pd

# 爬取热搜榜单
def scrape_hot_searches():
    url = 'https://top.baidu.com/board?tab=realtime'
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.82 Safari/537.36'
    }
    response = requests.get(url, headers=headers)

    if response.status_code == 200:
        soup = BeautifulSoup(response.text, 'html.parser')
        hot_searches = soup.select('div.category-wrap_iQLoo > div')

        data = []
        index = 1
        for hot_search in hot_searches[:100]:
            hot_search_title_element = hot_search.select_one('div.c-single-text-ellipsis')
            if hot_search_title_element:
                hot_search_title = hot_search_title_element.text.strip()

                # 获取"查看更多"按钮的链接
                detail_button = hot_search.select_one('div.hot-desc_1m_jR > a.look-more_3oNWC')
                if detail_button:
                    detail_url = detail_button['href']

                    try:
                        detail_response = requests.get(detail_url, headers=headers)

                        if detail_response.status_code == 200:
                            detail_soup = BeautifulSoup(detail_response.text, 'html.parser')
                            related_items = detail_soup.select('div.c-row')

                            content_list = []
                            for related_item in related_items[:5]:
                                content_list.append(related_item.get_text(strip=True))

                            data.append([index, hot_search_title] + content_list)
                            print(f'热搜 {index}: {hot_search_title} 爬取完成')
                            index += 1
                    except Exception as e:
                        print(f'热搜 {index}: {hot_search_title} 爬取失败')

        # 创建DataFrame并写入CSV文件
        columns = ['Index', 'Hot Search Title'] + [f'Content{i+1}' for i in range(5)]
        df = pd.DataFrame(data, columns=columns)
        df.to_csv('hot_search_data.csv', index=False, encoding='utf-8-sig')
        print('数据已保存到 hot_search_data.csv 文件')

# 运行爬虫
scrape_hot_searches()
