import os, time
import requests
import re
from urllib.parse import urlencode
from urllib.request import urlretrieve

def getPage(offset):
    '''爬取指定url页面信息'''
    params = {
        'tn': 'resultjson_com',
        'ipn': 'rj',
        'ct': '201326592',
        'is': '',
        'fp': 'result',
        'queryWord': '街拍',
        'cl': '2',
        'lm': '-1',
        'ie': 'utf-8',
        'oe': 'utf-8',
        'adpicid': '',
        'st': '-1',
        'z': '',
        'ic': '0',
        'word': '街拍',
        's': '',
        'se': '',
        'tab': '',
        'width': '',
        'height': '',
        'face': '0',
        'istype': '2',
        'qc': '',
        'nc': '1',
        'fr': '',
        'expermode': '',
        'pn': offset,
        'rn': '30',
        'gsm': '1e',
        '1537253593794': '',
    }
    url = 'https://image.baidu.com/search/acjson?' + urlencode(params)
    try:
        response = requests.get(url)
        response.encoding = "utf-8"
        if response.status_code == 200:
            return response.json()
    except requests.ConnectionError:
        return None

def getImages(json):
    '''解析获取图片信息'''
    data = json.get('data')
    if data:
        for item in data:
            if item:
                yield {
                    'image': item.get('middleURL'),
                    'title': item.get('fromPageTitleEnc'),
                }


def replaceS(title):
    '''非中文处理'''
    filtrate = re.compile(u'[^\u4E00-\u9FA5]')
    filtered_str = filtrate.sub(r'', title)
    return filtered_str

def saveImage(item):
    '''储存图片'''
    # 处理每组图片的存储路径
    title = replaceS(item.get('title'))
    path = os.path.join("./mypic_baidu/", title)
    if not os.path.exists(path):
        os.mkdir(path)

    # 拼装原图和目标图片的路径即名称
    local_image_url = item.get('image')
    # image_url = "http:"+local_image_url.replace('list', 'large')
    save_pic = path+"/"+local_image_url.split("/").pop()

    # 使用urllib中urlretrieve直接存储图片
    urlretrieve(local_image_url, save_pic)

def main(offset):
    ''' 主程序函数，负责调度执行爬虫处理 '''
    json = getPage(offset)
    for item in getImages(json):
        print(item)
        saveImage(item)

# 判断当前执行是否为主程序运行，并遍历调用主函数爬取数据
if __name__ == '__main__':
    # 爬取前5页（150张）
    for i in range(5):
        main(offset=i*30)
        time.sleep(1)
    print("图片存储完毕，请查看mypic_baidu文件夹")
