#爬取百度搜索街拍图片
import os,time,json,requests,re,sys,urllib
from urllib.parse import urlencode


def getPage(offset):
    '''爬取街拍信息'''
    keyword = '街拍'
    word=urllib.parse.quote(keyword, safe='/')
    old_url = 'http://image.baidu.com/search/flip?tn=baiduimage&ie=utf-8&word='
    # url = 'http://image.baidu.com/search/acjson?'+urlencode(params)
    url = old_url+ word + "&pn=" +str(offset) + "&gsm="+str(hex(offset))+"&ct=&ic=0&lm=-1&width=0&height=0"

    try:
        res = requests.get(url)
        if res.status_code == 200:
            return res.text
    except Exception as e:
        print('没有数据')
        return None

def parsePage(html):
    '''解析数据'''
    pat = '"objURL":"(.*?)",'
    html = str(html)
    items = re.findall(pat,html,re.S)
    return items

def savePage(item,j):
    '''保存图片'''
    try:
        pic = requests.get(item,timeout=15)
        string =str(j) + '.jpg'
        with open('./images/baidu/'+string, 'wb') as f:
            f.write(pic.content)
            print('成功下载第%s张图片: %s' % (str(j), str(item)))
    except Exception as e:
        print('下载第%s张图片时失败: %s' % (str(j), str(item)))
        print(e)

def main(offset):
    '''主函数'''
    #判断当前路径是否存在images文件夹
    if not os.path.exists('./images/baidu'):
        os.makedirs('./images/baidu')
    j = 1
    html = getPage(offset)
    items = parsePage(html)
    for item in items:
        savePage(item,j)
        j += 1

if __name__ == '__main__':
    # main(0)
    for i in range(1):
        offset = i*30
        main(offset)
        print("第%s页完成。"%(i+1))
        time.sleep(1)