import os.path
import time

import requests
import json


# 顺序 抓包-> 构造参数->  处理json
# 难点1：由于网站改版，网站url直接由params拼接成，所有‘？’后面路径不用复制
# 难点2：网站改版后反爬升级，每次进行访问需要进行sleep，或者关闭证书验证

cookie = 'accessId=578c8dc0-6fab-11e8-ab7a-fda8d0606763; qimo_seosource_578c8dc0-6fab-11e8-ab7a-fda8d0606763=%E7%BB%94%E6%AC%8F%E5%94%B4; qimo_seokeywords_578c8dc0-6fab-11e8-ab7a-fda8d0606763=; BIGipServerPools_Web_ssl=1531750592.47873.0000; Hm_lvt_c01558ab05fd344e898880e9fc1b65c4=1642408479,1642492113,1642493133; Hm_lpvt_c01558ab05fd344e898880e9fc1b65c4=1642493141; pageViewNum=10'
url = 'https://www.quanjing.com/Handler/SearchUrl.ashx?'

# 请求头
headers = {
    'Cookie': cookie,
    'Host': 'www.quanjing.com',
    # 防盗链
    'Referer': 'https://www.quanjing.com/search/%E6%98%A5%E8%8A%82',
    'sec-ch-ua': '"Google Chrome";v="95", "Chromium";v="95", ";Not A Brand";v="99"',
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.54 Safari/537.36',
    'X-Requested-With': 'XMLHttpRequest'
}
search = input('你爬啥：')
# 注意每页最多有100张图，不要爬太多了
for i in range(1, 2):
    params = {
        't': '3916',
        # 返回值
        'callback': 'searchresult',
        # 关键词
        'q': f'{search}',
        'stype': '1',
        'pagesize': '100',
        # 翻页
        'pagenum': i,
        'imageType': '2',
        'fr': '1',
        'sortFlag': '1',
        '_': '1642411013572'
    }
    # 爬json数据需要params
    html = requests.get(url, params=params, headers=headers).text
    # 字符串截取掐头去尾
    start = html.find('{"pageindex')
    end = html.find('}]})') + len('}]})')
    # print(f'截取开始点：{start},结束点:{end-1},总长度{len(html)}')
    data = json.loads(html[start: end-1])['imglist']
    # 获取图片地址以及id名
    for q in data:
        pic_id = q['pic_id']
        pic_url = q['imgurl']
        print(pic_id, pic_url)
        if not os.path.exists('resource'):
            os.makedirs('resource')
        with open(f'./resource/{pic_id}.jpg', 'wb') as p:
            # verify关闭证书，反爬
            p.write(requests.get(pic_url, verify=False).content)
    time.sleep(1)