# 3 有反机器人的网站 - 知乎文章 / 微博实时热点
import requests as rs

# url = 'https://www.zhihu.com/api/v4/members/crossin/articles?include=data%5B*%5D.comment_count%2Csuggest_edit%2Cis_normal%2Cthumbnail_extra_info%2Cthumbnail%2Ccan_comment%2Ccomment_permission%2Cadmin_closed_comment%2Ccontent%2Cvoteup_count%2Ccreated%2Cupdated%2Cupvoted_followees%2Cvoting%2Creview_info%2Cis_labeled%2Clabel_info%3Bdata%5B*%5D.author.badge%5B%3F(type%3Dbest_answerer)%5D.topics&offset=40&limit=20&sort_by=created'
url = 'https://weibo.com/a/hot/realtime'

# 字典，包括了浏览器 headers 部分信息
h = {
    'user-agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.72 Safari/537.36',
    'Host': 'weibo.com',
    # 注意：这里 cookie 不是真实的，真实的 cookie 需要你通过 chrome 开发者工具，从要抓取的网页复制过来
    'cookie':'................ SUB=_2AkMqaiMkf8NxqwJRmPAUyWvhbo12wgrEieKcNtL_JR2x;42d051b79679e;............',
    }

req = rs.get(url,headers=h) # 加了一个参数 headers，用于绕过网站的限制

d = req.content # 返回二进制结果
print('req.content: ', d)

# 也可以用 req.text 直接返回字符串
print('req.text', req.text)

print('req.encoding: ', req.encoding)
req.encoding = 'utf8' # 如果没有判断出网页编码，需要指定一下网页编码，网页常用编码为 utf8

with open('hot.html', 'wb') as f: # 保存结果到一个 HTML 中
    f.write(d)