import requests

for page in range(1, 1):
    page += 30
    url = 'https://image.baidu.com/search/acjson?tn=resultjson_com&logid=8659352358105487496&ipn=rj&ct=201326592&is=&fp=result&fr=&word=%E7%BE%8E%E5%A5%B3&cg=girl&queryWord=%E7%BE%8E%E5%A5%B3&cl=2&lm=-1&ie=utf-8&oe=utf-8&adpicid=&st=-1&z=&ic=0&hd=&latest=&copyright=&s=&se=&tab=&width=&height=&face=0&istype=2&qc=&nc=1&expermode=&nojc=&isAsync=&pn=30&rn=30&gsm=1e0000000000001e&1666878856429='
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/106.0.0.0 Safari/537.36'
    }

    resp = requests.get(url, headers=headers)
    json_data = resp.json()
    data_list = json_data['data']

    lit = []
    for inem in data_list:
        if len(inem) != 0:
            urls = inem['hoverURL']
            lit.append(urls)

    print(lit)
    # print(lit)
    for imte in lit:
        resp = requests.get(url=imte, headers=headers).content
        title = imte.split('=')[1]
        with open(f'img/{title}.jpg', mode='wb',) as f:
            f.write(resp)
            print(f'{title}成功爬取')



    print(f'成功爬取完第{page}页')
    # print(url)
    # print(page)