# -*- codeing = utf-8 -*-
# @Time : 2024/7/23 11:02
# @Author : 袁美玉
# @File : demo1_百度图片抓取.py
# @Software  ：PyCharm

'''
爬取百度上的雪山照片
参考点： 1.没有采取正则匹配的方式去找到对象，而是采用了json数据格式
        2.设置了分页查询，可以控制查询的页数，即查询多少个
'''
import requests

headers = {
"Cookie":
"BDIMGISLOGIN=0; winWH=%5E6_1872x966; BDqhfp=%E9%9B%AA%E5%B1%B1%26%26NaN-1undefined%26%263264%26%263; BIDUPSID=54EC44A93059F856AD40192B12CC9E37; PSTM=1720771497; BAIDUID=54EC44A93059F856D9A123344394C7EE:FG=1; BAIDUID_BFESS=54EC44A93059F856D9A123344394C7EE:FG=1; ZFY=o:AAaGy:AguPRjYq8FEQwXspbStdfLvsqlep7ZicO9XPs:C; newlogin=1; ZD_ENTRY=other; BDRCVFR[dG2JNJb_ajR]=mk3SLVN4HKm; BDRCVFR[-pGxjrCMryR]=mk3SLVN4HKm; H_WISE_SIDS=60359_60492_60502; BDRCVFR[tox4WRQ4-Km]=mk3SLVN4HKm; H_PS_PSSID=60359_60492_60502; BA_HECTOR=0k848g2k2g81a40g010ka0853g90de1j9uilv1v; PSINO=1; delPer=0; H_WISE_SIDS_BFESS=60359_60492_60502; BDORZ=B490B5EBF6F3CD402E515D22BCDA1598; userFrom=www.baidu.com; ab_sr=1.0.1_YTMwMjM5ODViZjFjMzU2ZGE5MGE0MjE4NTg4NzJhN2EwZjRmOTY0MDZlYjRiMGI3NmQ4ZGFiNTU5ZTE5ZWExNWVlODJjMWU5ODQxMjE0ZGU2ZDEwYmI4MWZhMzNlMzcxNDllNTA2NDgxMGY2NjM0ZmFjNWJlY2ViMDNiZGZkN2UxYWJhYTY1OTNmN2RjNzI3NzdiMzYxMjY3OWQyZTcyOQ==",
"Host":
"image.baidu.com",
"Referer":
"https://image.baidu.com/search/index?tn=baiduimage&ps=1&ct=201326592&lm=-1&cl=2&nc=1&ie=utf-8&dyTabStr=MCwxLDMsMiw0LDYsNSw3LDgsOQ%3D%3D&word=%E9%9B%AA%E5%B1%B1",
"User-Agent":
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36 Edg/126.0.0.0"
}
number = 1
for page in range(1,3):
    url = f'https://image.baidu.com/search/acjson?tn=resultjson_com&logid=11422453112783315836&ipn=rj&ct=201326592&is=&fp=result&fr=&word=%E9%9B%AA%E5%B1%B1&queryWord=%E9%9B%AA%E5%B1%B1&cl=2&lm=-1&ie=utf-8&oe=utf-8&adpicid=&st=&z=&ic=&hd=&latest=&copyright=&s=&se=&tab=&width=&height=&face=&istype=&qc=&nc=1&expermode=&nojc=&isAsync=&pn={page*30}&rn=30&gsm=5a&1721715424312='
    response = requests.get(url=url,headers=headers)
    json_data = response.json()
    data_list = json_data['data']
    # print(json_data)
    for data in data_list[:-1]:
        fromPageTitleEnc = data['fromPageTitleEnc']
        middleURL = data['middleURL']
        print(fromPageTitleEnc,middleURL)
        img_data = requests.get(middleURL).content
        with open(f'img/{number}.jpg',mode='wb') as f:
            f.write(img_data)
        number += 1