import requests
from bs4 import BeautifulSoup
import csv
url = 'https://s.weibo.com/top/summary?Refer=top_hot&topnav=1&wvr=6'
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36',
    'Cookie': 'PC_TOKEN=56acebb003; SUB=_2AkMQZPDyf8NxqwFRmf4XzG7jbo9-zw_EieKmOAEpJRMxHRl-yT9kqmMBtRB6O-TeHZuxtT8UZqw1sWOhePlJmZYij8Kt; SUBP=0033WrSXqPxfM72-Ws9jqgMF55529P9D9WFn8kh0T0UU6726FLnSMwdd; XSRF-TOKEN=vJQRzCbKSfGH5GyielTrC6A8; WBPSESS=UpRpmiZuabnjxMaPsrZiV8mI2aSltpEjS1i-ewThzWbdWlxWt7mFXo_iKbz2bakTR5fSWhTsz6IKwa6ZqAHwWpB_GBOWh-xULcSw_FNbM3nh_6N261LJfmxbaUL4cTab'
}
response = requests.get(url=url, headers=headers)
soup = BeautifulSoup(response.text, 'html.parser')
trs = soup.find('tbody').find_all('tr')
num = 1
f = open(f'D:\热搜.csv', mode='a', encoding='utf-8', newline='')
csv_write = csv.DictWriter(f, fieldnames=['排序', '标题', '热度'])
for tr in trs:
    title = tr.find('td', class_='td-02').a.text
    hot = tr.find('td', class_='td-03').find('span', class_='icon-txt').text
    num += 1
    result = {
        '排序': num,
        '标题': title,
        '热度': hot
    }
    csv_write.writerow(result)
    print("爬虫完成")
f.close()
