import requests
import re
import json
if __name__== "__main__":
    headers={
        'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36 Edg/124.0.0.0'  
    }
    jishu=89
    url='https://image.baidu.com/search/acjson'
    params={
        'tn': 'resultjson_com',
        'logid': '8936465709811120571',
        'ipn': 'rj',
        'ct': '201326592',
        'is': '',
        'fp': 'result',
        'fr': '',
        'word': '神里绫华',
        'queryWord': '神里绫华',
        'cl': '2',
        'lm': '-1',
        'ie': 'utf-8',
        'oe': 'utf-8',
        'adpicid': '',
        'st': '-1',
        'z':'',
        'ic': '0',
        'hd': '',
        'latest': '',
        'copyright': '',
        's': '',
        'se': '',
        'tab': '',
        'width': '',
        'height':'',
        'face': '0',
        'istype': '2',
        'qc': '',
        'nc': '1',
        'expermode': '',
        'nojc':'',
        'isAsync':'',
        # 改变页数
        'pn': '120',
        'rn': '30',
        'gsm': '1e',
        '1715434782665':''
        }
    response=requests.get(url,params,headers=headers)
    st=response.text
    pattern='https://img\d\.baidu\.com/it/u=\d+,\d+&fm=\d+&fmt=auto&app=\d+&f=JPEG\?w=\d+&h=\d+'
    sbs=re.findall(pattern,st)
    sllh_list1=list(set(sbs))
    print(len(sllh_list1))
    # for i in sllh_list1:
    #     with open(f'.\爬虫学习\图片爬取\神里绫华{jishu}.png','wb') as fp:
    #         jishu+=1
    #         url_2=i
    #         fp.write(requests.get(url_2).content)

    print('ok')

    