import re
import urllib
import requests
import time
import random


def get_url(keyword, page):
    keyword = urllib.parse.quote(keyword, safe='/')
    prefix = "http://image.baidu.com/search/flip?tn=baiduimage&ie=utf-8&word="
    url = prefix + keyword + "&pn=" + \
          str(page) + "&gsm=" + str(hex(page)) + "&ct=&ic=0&lm=-1&width=0&height=0"
    return url

def get_image_urls(url):
    time.sleep(random.uniform(0, 1.5))
    try:
        html = requests.get(url).text
    except Exception as e:
        print(e)
        image_urls = []
        return image_urls
    image_urls = re.findall('"objURL":"(.*?)",', html, re.S)
    return image_urls


def down_image(image_urls, path):
    for i, image_url in enumerate(image_urls):
        time.sleep(random.uniform(0, 1.5))
        try:
            image = requests.get(image_url, timeout=15)
            string = str(i + 1) + '.jpg'
            with open(path + string, 'wb') as f:
                f.write(image.content)
                print('成功下载第%s张图片: %s' % (str(i + 1), str(image_url)))
        except Exception as e:
            print('下载第%s张图片时失败: %s' % (str(i + 1), str(image_url)))
            print(e)
            continue


if __name__ == '__main__':
    keyword = "xxx"
    path = r"./crawl/"
    pageBegin = 0
    pageSize = 30
    pageTotal = 100
    imageUrlLists = []
    while True:
        if pageBegin > pageTotal:
            break
        print("第{}次请求数据".format([pageBegin]))
        url = get_url(keyword, pageBegin * pageSize)
        imageURL = get_image_urls(url)
        pageBegin += 1
        imageUrlLists.extend(imageURL)
    down_image(list(set(imageUrlLists)), path)