from time import sleep
import requests

# 返回的几乎都是新浪微博的图片 而新浪设置了防盗链
headers = {
    'Referer': 'https://weibo.com/',
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36',

}
proxies = {
    'http': 'http://47.119.164.33:8081',
}

for page in range(30, 30 * 10 + 1, 30):
    url_model = 'https://image.baidu.com/search/acjson?tn=resultjson_com&logid=10899260030925551680&ipn=rj&ct=201326592&is=&fp=result&fr=&word=%E8%94%A1%E5%BE%90%E5%9D%A4&queryWord=%E8%94%A1%E5%BE%90%E5%9D%A4&cl=2&lm=&ie=utf-8&oe=utf-8&adpicid=&st=-1&z=&ic=0&hd=&latest=&copyright=&s=&se=&tab=&width=&height=&face=0&istype=2&qc=&nc=1&expermode=&nojc=&isAsync=&pn={page}&rn=30&gsm=1e&1727860241161='.format(
        page=page)

    resp = requests.get(url_model, headers=headers, proxies=proxies)
    img_list = []
    for item in resp.json()['data']:
        if item.get('replaceUrl'):
            url = item.get('replaceUrl')[0]['ObjURL']
            img_list.append(url)

    path = 'D:/webDownload/spiderData/baidu_imgs/'  # 本地磁盘路径
    for url in img_list:
        try:
            img_resp = requests.get(url, headers=headers, proxies=proxies)
            if img_resp.status_code == 200:
                with open(path + url.split('/')[-1], 'wb') as f:
                    f.write(img_resp.content)
                    print(url.split('/')[-1] + "下载成功")

            else:
                print('请求错误')
        except Exception as e:
            print('fail')
