import urllib.request
import re
import multiprocessing
import os
import requests
from bs4 import BeautifulSoup
from urllib import parse
from urllib.parse import unquote
from urllib.parse import quote


headers = {
    'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'
}
cookies = {'cookies': '_ga=GA1.2.96760470.1574309668; is_human=1; __cfduid=d8606f012fcf1db15884c86979cda5eae1583326869; lang=zh; anonymous_user_id=dcc25bf2-1902-4811-b754-92fd0b722153; _gid=GA1.2.235242853.1583326869; _sp_id.aded=3972e025-4b78-4631-837a-440970670236.1574309670.5.1583326897.1578040715.f3cb55de-9d9c-43e3-b38b-ed7dcefae866; client_width=897'}

def is_file_exist(word):
    #word = unquote(word)
    path = 'E:/picture/pixabay/' + word
    if not os.path.exists(path):
        os.makedirs(path)

def getimg(keyword,page):
    page = str(page)
    keyword = unquote(keyword)
    path = 'E:/picture/pixabay/' + keyword+'/%s'%page
    if not os.path.exists(path):
        os.makedirs(path)
    #设置存储文件路径
    url='https://www.pexels.com/search/'+keyword+'/?page='+page
    #获取地址

    r = requests.get(url)
    soup = BeautifulSoup(r.content, "html.parser")
    # 用BeautifulSoup这个库解析html格式的字符串，把网页的源码解析成一个个类

    #all_a = soup.find_all('img',class_='photo-item__img')
    all_a = soup.find_all('img')
    # 根据网页源码分析，图片存储在标签div下的img标签中，div标签的class都为photo-item__img

    for i in range(len(all_a)):
        imgurl = all_a[i].attrs["data-big-src"]

        url = imgurl
        flag1 = url.rfind("/")
        flag1 = int(flag1)
        url2 = url[0:flag1]

        flag2 = url2.rfind("/")
        flag2 = int(flag2)

        number = url[flag2 + 1:flag1]
        #提取图片编号

        flag3 = url.rfind("?")
        flag3 = int(flag3)

        name = url[flag1 + 1:flag3]
        #提取图片类别

        imgurl = "https://images.pexels.com/photos/" + number + "/" + name + "?cs=srgb&dl=geometric-decoration-" + number + ".jpg&fm=jpg"
        #拼接图片下载链接

        print("正在下载：%s" % imgurl)
        # imgurl是图片的地址，一个地址一张图片，然后通过urllib里面的urlretrieve的这个函数把图片载到本地
      
        try:
            r = requests.get(imgurl,timeout=0.5)
        except:
            continue

        path1 = path +'/%s.jpg'%i
        if not os.path.exists(path1):
            try:
                with open(path+'/%s.jpg'%i, 'wb') as f:
                    f.write(r.content)
            except:
                print("something wrong")
        else:
            print("图片已存在")

        #try:
            #urllib.request.urlretrieve(imgurl, path + '/%s.jpg' % i)
        #except:
            #print('something wrong')


if __name__ == '__main__':
    keyword = input('请输入关键字：')
    # 输入需要 搜索图片的关键词
    pagenumber = input('请输入爬取的页数：')
    pagenumber = int(pagenumber)
    # 输入需要爬取图片的页数
    # keyword = urllib.parse.quote(keyword)
    is_file_exist(keyword)
    # 判断文件夹是否存在
    for page in range(4,pagenumber+1):
        getimg(keyword,page)