# -*- coding= utf-8 -*-
import requests
import re
import time
import random
import traceback
import os


def Getpn(num):
    pn = 0
    while num > 60:
        num -= 60
        pn += 40
    return pn


keyword = str(input("请输入关键字："))
num = int(input("图片数量："))
pn = Getpn(num)

star = time.mtie()
i = 0  # 从首页开始获取
url_string = ""
while i <= pn:
    try:
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.86 Safari/537.36'
        }
        url = "https://image.baidu.com/search/flip?tn=baiduimage&ie=utf-8&word=" + keyword + "&pn=" + str(
            pn) + "&gsm=3c&ct=&ic=0&lm=-1&width=0&height=0"
        r = requests.get(url, headers=headers).text
        url_string += r
        i += 40
        time.sleep(random.uniform(0.8, 1.2))
    except:
        print("获取页面失败")

picurl_list = re.findall('"objURL":"(.*?)"', url_string)  # 使用非贪婪表达式匹配准确获取objURL的链接
picurls = []
for picurl in picurl_list:
    onepicurl = re.findall('http://.+g', picurl)
    picurls.append(onepicurl[0])
if os.path.exists("baibu_Pic") == False:
    os.mkdir("baibu_Pic/")
i = 0
for url in picurls:
    i += 1
    name = keyword + str(i)
    try:
        r = requests.get(url)
        path = "baibu_Pic/" + name + ".jpg"
        with open(path, 'wb') as f:
            f.write(r.content)
            print(name + "爬取成功")
    except:
        print("爬取图片失败")
    if i == num:
        break
finish = time.time()
t = finish - star
print("共耗时：%fs" % t)
