'''
发送post请求
爬取有道词典翻译部分内容

get请求：请求参数，是在url中，问好后面
post请求：有请求体数据
        form data
'''
import requests

# 浏览一个需要登录材料看见url，查看Gitee上我的所有项目
url = "https://gitee.com/zhou-poetry-and-painting/projects"
# 没有加cookie看到的页面效果是没登录的效果
# 加上user-agent请求头，模拟浏览器
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36'
}
r1 = requests.get(url,headers=headers)
# 爬取的数据保存在文件中
with open('no_cookie.html','w',encoding='utf-8') as f:
    f.write(r1.content.decode())

# 加了cookie（网页上登录之后拷贝请求头中的cookie），看到效果会不一样
headers2 = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36',
    'Cookie': 'user_locale=zh-CN; oschina_new_user=false; tz=Asia%2FShanghai; sajssdk_2015_cross_new_user=1; Hm_lvt_24f17767262929947cc3631f99bfd274=1606370526; sensorsdata2015jssdkcross=%7B%22distinct_id%22%3A%228082191%22%2C%22first_id%22%3A%22176032503a7943-0dfecf28e3db0b-930346c-2073600-176032503a8c36%22%2C%22props%22%3A%7B%22%24latest_traffic_source_type%22%3A%22%E7%9B%B4%E6%8E%A5%E6%B5%81%E9%87%8F%22%2C%22%24latest_search_keyword%22%3A%22%E6%9C%AA%E5%8F%96%E5%88%B0%E5%80%BC_%E7%9B%B4%E6%8E%A5%E6%89%93%E5%BC%80%22%2C%22%24latest_referrer%22%3A%22%22%7D%2C%22%24device_id%22%3A%22176032503a7943-0dfecf28e3db0b-930346c-2073600-176032503a8c36%22%7D; remote_way=http; Hm_lpvt_24f17767262929947cc3631f99bfd274=1606370759; gitee-session-n=Y09zanc0Z1lYN3BuZXptMG1QWit3WjNDdVhSdHgrZUlqbDljeUd4S2tkK3RObHlOU21SYWRWV0NrWGtzM3FBZE9na016eGZQcVIrTFlLY2ttNXI3blI5SFk3WlYzQVBlWmpUKzRUUVNhNWpkYTIxS1BySUFxTzdIa0lCdmdSa203YWNkODRRYkNqblB3UUtSWU0xeWF0UnlSVGV3c3lFbWV2RTdHTmY4T3R6OERzNmF4KzN4MTgyaUNWNnJraDVEdnBjVU1YUUNZV1IrSU9OdG42Z1ZWZUZVMWtKenpWZ2pHNjNuWXI2bDAxVnlMMDhFS2hPOWRwM0NZbm5Gb3VYVmV1WTRHaFRONHNaUCs5cVhUM0FmOTQ1QkJsSTJkNysxSGZaU0NKOFFCQ1gxNktMbWxNNmJ3djBpSFp6d0E4amJWeTV2WkdocHFyK0NEeHR1b1UvV3B2TGU4OWZTS3NRMW1FZkVMTUU1cGtnPS0tRTdJS0lzT1hBMERmeE1yUks1dXlFZz09--dc33efc613cd74687f511b2dc8fe21b20df43e72'
}
r2 = requests.get(url, headers=headers2)
# 爬取的数据保存在文件中
with open('has.html', 'w', encoding='utf-8') as f:
    f.write(r2.content.decode())
