
# coding: utf-8

# In[1]:


import os 
os.chdir("E:\pythonstudy")


# In[2]:


import os,re,time
import requests
import urllib
from urllib.parse import urlencode
from urllib.request import urlretrieve
import json

def getPage(offset):
    '''爬取指定url页面信息'''
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36',
    }

    params = {
        'tn':'resultjson_com',
        'ipn':'rj',
        'ct':201326592,
        'is':'',
        'fp':'result',
        'queryWord':'街拍',
        'cl':2,
        'lm':-1,
        'ie':'utf-8',
        'oe':'utf-8',
        'adpicid':'',
        'st':-1,
        'z':'',
        'ic':0,
        'word':'街拍',
        'face':0,
        'istype':2,
        'qc':'',
        'nc':1,
        'pn':offset,
        'rn':30,
        'gsm':'1e',
        '1531790663290':''
    }

    url = 'http://image.baidu.com/search/acjson?' + urlencode(params)
    try:
        response = requests.get(url, headers=headers)
        if response.status_code == 200:
            str_json = response.content.decode('utf-8','ignore')
            return json.loads(str_json)
    except requests.ConnectionError:
        return None

def getImages(json):
    '''解析获取图片信息'''
    data = json.get('data')
    dr = re.compile(r'<[^>]+>', re.S)
    if data:
        for item in data:
            # print(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>", item)
            if item:
                imageURL = item.get('middleURL')
                fromPageTitle = item.get('fromPageTitle')
                title = dr.sub('', fromPageTitle)
                yield {
                    'image': imageURL,
                    'title': title
                }

def saveImage(item):
    ''' 储存图片 '''
    # 处理每组图片的存储路径
    pathTop = "./mypic"
    if not os.path.exists(pathTop):
        os.mkdir(pathTop)

    # 拼装原图和目标图片的路径即名称
    local_image_url = item.get('image')
    rstr = r"[\/\\\:\*\?\"\<\>\|]"  
    new_title = re.sub(rstr, "_", item.get('title'))  # 替换文件特殊字符，替换为下划线
    timeStr = str(time.time())
    save_pic = pathTop + "/" + new_title + '_' + timeStr + ".jpg"
    print(save_pic)
    
    # 模拟浏览器请求 参考：https://www.cnblogs.com/wasss/p/3990803.html
    headers = [('Host','img0.imgtn.bdimg.com'),
        ('Connection', 'keep-alive'),
        #('Cache-Control', 'max-age=0'),
        ('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'),
        ('User-Agent', 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36'),
        ('Accept-Encoding','gzip,deflate,sdch'),
        ('Accept-Language', 'zh-CN,zh;q=0.8'),
        #('If-None-Match', '90101f995236651aa74454922de2ad74'),
        ('Referer','http://image.baidu.com/i?tn=baiduimage&ps=1&ct=201326592&lm=-1&cl=2&nc=1&word=%E4%BA%A4%E9%80%9A&ie=utf-8'),
        #('If-Modified-Since', 'Thu, 01 Jan 1970 00:00:00 GMT')
    ]
    opener = urllib.request.build_opener()
    opener.addheaders = headers
    data = opener.open(local_image_url)

    f = open(save_pic, 'wb')
    f.write(data.read())  
    f.close()

def main(offset):
    ''' 主程序函数，负责调度执行爬虫处理 '''
    json = getPage(offset)
    for item in getImages(json):
        print(item)
        saveImage(item)


if __name__ == '__main__':
    # main(0)
    for i in range(5):
        main(offset=i*30)
        time.sleep(1)

