# -*- coding: utf-8 -*-
"""
Created on Sat May 23 11:36:55 2020

@author: Matrix
"""
import urllib.request
import re
import multiprocessing
import os
import json
import time
import requests
import random
from bs4 import BeautifulSoup
from urllib import parse
from urllib.parse import unquote
from urllib.parse import quote
import socket



headers = {
    'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'
}
cookies = {'cookies': '_ga=GA1.2.96760470.1574309668; is_human=1; __cfduid=d8606f012fcf1db15884c86979cda5eae1583326869; lang=zh; anonymous_user_id=dcc25bf2-1902-4811-b754-92fd0b722153; _gid=GA1.2.235242853.1583326869; _sp_id.aded=3972e025-4b78-4631-837a-440970670236.1574309670.5.1583326897.1578040715.f3cb55de-9d9c-43e3-b38b-ed7dcefae866; client_width=897'}
proxies={'http': 'http://60.188.16.15:3000', 'https': 'https://60.188.16.15:3000'}


def is_file_exist(word):
    #word = unquote(word)
    path = 'E:/picture/unsplash/' + word
    if not os.path.exists(path):
        os.makedirs(path)


def download_images(keyword,page):
    page = str(page)
    keyword = unquote(keyword)
    path = 'E:/picture/unplash/' + keyword+'/%s'%page +'/'
    #设置存储文件路径
    if not os.path.exists(path):
        os.makedirs(path)
    
    url='https://unsplash.com/napi/photos?page='+page+'&per_page=12&order_by='+keyword
    
    data = json.loads(requests.get(url).text)
    
    image_list=[]
    image_id=[]
    for i in range(0,len(data)):
        image_url=data[i]['urls']['raw']
        image_list.append(image_url)
        name=data[i]['id']
        image_id.append(name)
 
    for i in range(0,len(image_list)):
        print("正在下载：%s" %image_list[i])
        auto_down(image_list[i],path + str(image_id[i])+'.jpg')
        print('第' + str(i) + '个图片下载完成')


def auto_down(url,filename):
    socket.setdefaulttimeout(30)
    try:
        urllib.request.urlretrieve(url, filename)
    except socket.timeout:
        count = 1
        while count <= 5:
            try:
                urllib.request.urlretrieve(url, filename)
                break
            except socket.timeout:
                err_info = 'Reloading for %d time' % count if count == 1 else 'Reloading for %d times' % count
                print(err_info)
                count += 1
        if count > 5:
            print("downloading picture failed!")



if __name__ == '__main__':
    keyword = input('请输入关键字：')
    # 输入需要 搜索图片的关键词
    pagenumber = input('请输入爬取的页数：')
    pagenumber = int(pagenumber)
    # 输入需要爬取图片的页数
    # keyword = urllib.parse.quote(keyword)
    is_file_exist(keyword)
    # 判断文件夹是否存在
    for page in range(1,pagenumber+1):
        download_images(keyword,page)
