#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2021/1/23 20:02
# @Author  : Cxk
import requests,os #首先导入库
import  re,time,random

MaxSearchPage = 20 # 默认收索页数
CurrentPage = 0 # 当前正在搜索的页数
count_img=0

# face_recognitio = FaceRecognition("config.yml")
#图片链接正则和下一页的链接正则
def imageFiler(content): # 通过正则获取当前页面的图片地址数组
          return re.findall('"objURL":"(.*?)"',content,re.S)
def nextSource(content): # 通过正则获取下一页的网址
          nexts = re.findall('<div id="page">.*<a href="(.*?)" class="n">',content,re.S)[0] 
          return nexts
#爬虫主体
def spidler(source,all_img):
    global folder_path,count_img,MaxSearchPage,CurrentPage
    s=requests.session()
    s.headers['User-Agent']='Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.119 Safari/537.36'
    content=s.get(source).content.decode('utf-8')
    imageArr = imageFiler(content) # 获取图片数组
    for imageUrl in imageArr:
        try:
            picture = requests.get(imageUrl,timeout=5) 
            # 创建图片保存的路径
            pictureSavePath = folder_path+str(count_img)+str(random.randint(0,2754))+'.jpg'
            with open(pictureSavePath, 'wb') as f:
                f.write(picture.content)
            count_img+=1
            time.sleep(0.3)
            print('**********************%s.jpg完成****************'%count_img)
            if count_img>=all_img:
                break
        except:
            continue
    if CurrentPage <= MaxSearchPage and count_img<all_img:    #继续下一页爬取
        if nextSource(content):
            CurrentPage += 1 
            # 爬取完毕后通过下一页地址继续爬取
            spidler("http://image.baidu.com" + nextSource(content),all_img)
            
#爬虫的开启方法
def  beginSearch(key,all_img):#page搜索页面，数字上加两页就是实际搜索页面，列如page=-1，实际搜索页面为一页 
    global folder_path
    #图片保存路径
    folder_path = './img/'
    if os.path.exists(folder_path) == False:
        os.makedirs(folder_path)
    StartSource = "http://image.baidu.com/search/flip?tn=baiduimage&ie=utf-8&word=" + str(key) + "&ct=201326592&v=flip" # 分析链接可以得到,替换其`word`值后面的数据来搜索关键词
    spidler(StartSource,all_img)
    print('**********************全部完成****************')


if __name__ == "__main__":    
    all_img=150 #每个关键字想要爬取的图片数
    #关键字
    a=["狗",'老鼠','恐龙','马','孔雀']
    for line in a:
        count_img=0
        beginSearch(line,all_img)
        
