import requests
import bs4
import re
import urllib

headers = {
    'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 11_1_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.96 Safari/537.36 Edg/88.0.705.53',
    'Cookie': 'SINAGLOBAL=18066498725.752834.1611537786966; login_sid_t=0297fcbfcb9b8c877699af368703a4a9; cross_origin_proto=SSL; _s_tentry=passport.weibo.com; Apache=7322444667209.109.1612003019387; ULV=1612003019394:3:3:3:7322444667209.109.1612003019387:1611664480082; WBStorage=8daec78e6a891122|undefined; WBtopGlobal_register_version=2021013018; crossidccode=CODE-yf-1L5NDb-45CE7F-dsBzTlzuJnfUzJUbd9269; SSOLoginState=1612003053; SUB=_2A25NEUa9DeRhGeBM4lYW-SjNyzqIHXVu-mr1rDV8PUJbkNANLXekkW1NRLuaHZjERnHtp_F0ofSXEqc03GX0senk; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9W5.021gAC84ZGSR2xVygvfs5NHD95Qceo.XS0.ceK5cWs4Dqcj7i--4iK.fi-isi--4i-20iKy8i--fiKnNi-27i--Xi-zRiKnNi--4iKnNiKyheo2E; wvr=6; UOR=,,graph.qq.com; webim_unReadCount=%7B%22time%22%3A1612003064568%2C%22dm_pub_total%22%3A1%2C%22chat_group_client%22%3A0%2C%22chat_group_notice%22%3A0%2C%22allcountNum%22%3A43%2C%22msgbox%22%3A0%7D',
    'Referer': 'https://d.weibo.com/' # 此反爬虫要点
}
url = "https://i.news.qq.com/trpc.qqnews_web.kv_srv.kv_srv_http_proxy/list?sub_srv_id=antip&srv_id=pc&offset=0&limit=20&strategy=1&ext={%22pool%22:[%22high%22,%22top%22],%22is_filter%22:10,%22check_type%22:true}"
# 请求借口（爬取更多数据要构造）

bs4_obj = requests.get(url=url,headers=headers)
obj = re.findall(re.compile(',"title":"(.*?)"', re.S), bs4_obj.text)
link = re.findall(re.compile(',"url":"(.*?)"',re.S),bs4_obj.text)
date = {
    "标题":obj,
    "链接":link
}
print(date)
