#爬取百度图片
import requests,time,re,os
from urllib.request import urlretrieve
from requests.exceptions import RequestException

def  baidtu_uncomplie(url):
    res = ''
    c = ['_z2C$q', '_z&e3B', 'AzdH3F']
    d= {'w':'a', 'k':'b', 'v':'c', '1':'d', 'j':'e', 'u':'f', '2':'g', 'i':'h', 't':'i', '3':'j', 'h':'k', 's':'l', '4':'m', 'g':'n', '5':'o', 'r':'p', 'q':'q', '6':'r', 'f':'s', 'p':'t', '7':'u', 'e':'v', 'o':'w', '8':'1', 'd':'2', 'n':'3', '9':'4', 'c':'5', 'm':'6', '0':'7', 'b':'8', 'l':'9', 'a':'0', '_z2C$q':':', '_z&e3B':'.', 'AzdH3F':'/'}
    if(url==None or 'http' in url):
        return url
    else:
        j= url
        for m in c:
            j=j.replace(m,d[m])
        for char in j:
            if re.match('^[a-w\d]+$',char):
                char = d[char]
            res= res+char
        return res


def getPage(offset):
    '''爬取网页数据'''
    url = 'https://image.baidu.com/search/acjson'
    params = {
        'tn':'resultjson_com',
        'ipn':'rj',
        'ct':'201326592',
        'is':'',
        'fp':'result',
        'queryWord':'街拍',
        'cl':'2',
        'lm':'-1',
        'ie':'utf-8',
        'oe':'utf-8',
        'adpicid':'',
        'st':'-1',
        'z':'',
        'ic':'0',
        'hd':'',
        'latest':'',
        'copyright':'',
        'word':'街拍',
        's':'',
        'se':'',
        'tab':'',
        'width':'',
        'height':'',
        'face':'0',
        'istype':'2',
        'qc':'',
        'nc':'1',
        'fr':'',
        'expermode':'',
        'force':'',
        'pn':offset*30,
        'rn':'30',
        'gsm':'1e',
        '1546344495643':'',
    }
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36',
    }

    try:
        res = requests.get(url,params=params,headers=headers)
        if res.status_code==200:
            return res.json()
        else:
            return None        
    except RequestException:
        raise None


def parsePage(content):
    '''解析网页数据'''    
    items = content.get('data')
    for item in items:
        yield{
            'title':item.get('fromPageTitleEnc'), #加密了
            'image':baidtu_uncomplie(item.get('objURL')),
        }

def saveImage(item,count):
    '''将图片保存到本地'''
    path = os.path.join('./baidupic/')
    if not os.path.exists(path):
        os.mkdir(path)

    #有无效的url,每页最后一个为None
    try:
        urlretrieve(item.get('image'),path+'/'+'%d.jpg'%count)
    except Exception as e:
        print(e)   
        
    print('%d.jpg'%count)


def main(offset,count):
    content = getPage(offset)
    if content:
        for item in parsePage(content):
            saveImage(item,count)
            count += 1
    return count

if __name__=="__main__":
    count = 1
    for i in range(1,5):
        count = main(i,count)     
        time.sleep(1)




















