import requests
from bs4 import BeautifulSoup


class BaiduHot:
    def __init__(self):
        self.platform = "baidu"
        self.url = "https://top.baidu.com/board?tab=realtime"

    def fetch(self):
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36',
            'Referer': 'https://top.baidu.com/'
        }

        try:
            resp = requests.get(self.url, headers=headers)
            if resp.status_code != 200:
                print(f"请求失败，状态码：{resp.status_code}")
                return []

            # 输出部分HTML内容用于调试
            print(resp.text[:500])  # 打印前500个字符

            soup = BeautifulSoup(resp.text, 'html.parser')

            # 注意：下面的查找方式需要根据实际情况调整
            script_data = soup.find('script', id='JSONData')
            if not script_data or not script_data.text.strip():
                print("未找到带有指定ID的<script>标签或标签内无内容")
                return []

            data = json.loads(script_data.text)

            hotspots = []
            for item in data['data']['cards'][0]['content']:
                hotspots.append({
                    'title': item['word'],
                    'hot': item['hotScore'],
                    'desc': item.get('desc', ''),
                    'url': item['url'],
                    'cover': item.get('img', '')
                })
            return hotspots[:15]

        except Exception as e:
            print(f"百度热搜抓取失败: {str(e)}")
            return []


def main():
    baidu_hot = BaiduHot()
    hotspots = baidu_hot.fetch()

    if hotspots:
        print("百度热搜榜单：")
        for idx, hotspot in enumerate(hotspots, start=1):
            print(f"{idx}. 标题: {hotspot['title']}, 热度: {hotspot['hot']}")
    else:
        print("未能获取到百度热搜数据。")


if __name__ == "__main__":
    main()