#coding:utf-8
import requests
import re
import os
import time
import random


def get_text(url): #请求函数
    try:
        text = requests.get(url, headers=headers
                            )
        text.raise_for_status()
        return text.text
    except BaseException as k:
        print(k)
        return ''


def get_img_download(url, exclude):
    html = get_text(url)
    if html is not None:
        sub_urls = re.findall(r'/photo/[\w+-]+-\w+-\d+/', html, re.S) # re找地址
        for sub_url in sub_urls:
            time.sleep(random.uniform(1.0,3.0)) # 对服务器好一点
            img_name = sub_url.split('/')[-2]
            sub_url = 'https://www.pexels.com' + sub_url
            content = get_text(sub_url)
            img_url = re.findall(r'data-zoom-src=\'(.*?)\'', content)[0] # re找地址
            if img_url not in exclude: # 判断是否爬取过，防止重复爬取
                with open('{}.jpeg'.format(img_name), 'wb') as file: # 存储图像
                    exclude.append(img_url)
                    file.write(requests.get(img_url).content)


if __name__ == '__main__':
    url_template = 'https://www.pexels.com/search/{}/?page={}'
    key_word = input('please enter what do you want: ')

    headers = {"user-agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36"}
    urls = [url_template.format(key_word, i+1) for i in range(1, 10)]
    exclude = []
    file_path = os.getcwd() + '\\{}'.format(key_word)
    #os.mkdir(file_path)
    os.chdir(file_path)
    for url in urls:
        get_img_download(url, exclude)

