# 爬取百度图片的街拍图片信息
import os
import time
import requests
import re
from urllib.parse import urlencode
from urllib.request import urlretrieve
from requests import RequestException
import random


def getPage(page):
    '''爬取信息'''
    # 定义参数
    pn = int(60 + int(page * 30))
    params = {
        'tn': 'resultjson_com',
        'ipn': 'rj',
        'ct': '201326592',
        'is': '',
        'fp': 'result',
        'queryWord': '街拍',
        'cl': '2',
        'lm': '-1',
        'ie': 'utf-8',
        'oe': 'utf-8',
        'adpicid': '',
        'st': '-1',
        'z': '',
        'ic': '0',
        'word': '街拍',
        's': '',
        'se': '',
        'tab': '',
        'width': '',
        'height': '',
        'face': '0',
        'istype': '2',
        'qc': '',
        'nc': '1',
        'r': '',
        'pn': pn,
        'rn': '30',
        'gsm': '78',
        '1532570415610': '',
    }

    headers = {
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
        'Accept-Encoding': 'deflate, br',
        'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
        'Connection': 'keep-alive',
        'Cookie': 'BDqhfp=%E8%A1%97%E6%8B%8D%26%260-10-1undefined%26%260%26%261; BAIDUID=76FE93753AF9371FA6F3F5AD7C330D0A:FG=1; BIDUPSID=76FE93753AF9371FA6F3F5AD7C330D0A; PSTM=1522129829; pgv_pvi=4045239296; pgv_si=s5251164160; BDRCVFR[5IRyTarJWqT]=mbxnW11j9Dfmh7GuZR8mvqV; MCITY=-257%3A; PSINO=5; H_PS_PSSID=26523_1450_21096_26350_20719; BDRCVFR[X_XKQks0S63]=mk3SLVN4HKm; BDRCVFR[-pGxjrCMryR]=mk3SLVN4HKm; firstShowTip=1; cleanHistoryStatus=0; indexPageSugList=%5B%22%E8%A1%97%E6%8B%8D%22%2C%22python%22%2C%22%E4%B8%9D%E8%A2%9C%22%5D; userFrom=null',
        'Host': 'image.baidu.com',
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36',
        'Upgrade-Insecure-Requests': '1',
    }

    try:
        url = 'https://image.baidu.com/search/acjson?'
        res = requests.get(url, headers=headers, params=params)
        if res.status_code == 200:
            return res.text
    except RequestException as e:
        print('reason=', e.reason)  # 错误原因
        print('code=', e.code)  # HTTP的状态码
        return None


def getImage(html):
    '''获取图片信息'''
    pat = '"thumbURL":"(.*?)"'
    URLList = re.findall(pat, html)
    return URLList


def saveImage(url):
    '''储存图片信息'''
    # 处理图片的存储路径
    # 创建目录
    path = "./BaiduImage"
    if not os.path.exists(path):
        os.mkdir(path)

    # 图片路径的拼接
    save_pic = path + "/" + str(time.time()) + ".jpg"

    # 存储图片
    # urllib在保存图片文件上比requests牛逼
    urlretrieve(url, save_pic)
    print('保存图片：', save_pic)


def main(page):
    '''主函数'''
    html = getPage(page)
    if html:
        for img in getImage(html):
            saveImage(img)
    else:
        print('getPage()发生错误')


if __name__ == "__main__":
    for page in range(1, 11):
        print('=' * 10, '爬取第', page, '/10 页图片', '=' * 10)
        main(page)
        time.sleep(random.randint(2, 5))
