import os,time
import requests
from urllib.parse import urlencode
from urllib.request import urlretrieve

def getPage(offset):
    '''爬取指定url页面信息'''
    params = {
        'tn': 'resultjson_com',
        'ipn': 'rj',
        'ct':'201326592',
        'is':'',
        'fp': 'result',
        'queryWord': '街拍',
        'cl': '2',
        'lm': '-1',
        'ie':'utf - 8',
        'oe': 'utf - 8',
        'word':'街拍',
        'nc':'1',
        'pn': offset,
        'rn': '30',
        'gsm': '1'
    }
    url = 'https://image.baidu.com/search/index?' + urlencode(params)


    try:
        response = requests.get(url)
        if response.status_code == 200:
            return response.json()
    except requests.ConnectionError:
        return None

def getImages(json):
    '''解析获取图片信息'''
    data = json.get('data')
    if data:
        for item in data:
            if item:
                yield {
                    'image': item.get("middleURL"),
                }


def saveImage(item):
    '''储存图片'''
    path = os.path.join("./mypic/")
    if not os.path.exists(path):
        os.mkdir(path)
    # 图片的路径即名称
    local_image_url = item.get('image')
    save_pic = path + "/" + local_image_url.split("/").pop()
    # 使用urllib中urlretrieve直接存储图片
    urlretrieve(local_image_url,save_pic)



def main(offset):
    ''' 主程序函数，负责调度执行爬虫处理 '''
    json = getPage(offset)
    for item in getImages(json):
        saveImage(item)

# 判断当前执行是否为主程序运行，并遍历调用主函数爬取数据
if __name__ == '__main__':
    for i in range(1,6):
        main(offset=i*30)
        time.sleep(1)