# -*- coding: utf-8 -*-
"""
Created on Sat May 23 11:36:55 2020

@author: Matrix
"""
import urllib.request
import re
import multiprocessing
import os
import time
import requests
import random
from bs4 import BeautifulSoup
from urllib import parse
from urllib.parse import unquote
from urllib.parse import quote
import socket

socket.setdefaulttimeout(0.5)

headers = {
    'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'
}
cookies = {'cookies': '_ga=GA1.2.96760470.1574309668; is_human=1; __cfduid=d8606f012fcf1db15884c86979cda5eae1583326869; lang=zh; anonymous_user_id=dcc25bf2-1902-4811-b754-92fd0b722153; _gid=GA1.2.235242853.1583326869; _sp_id.aded=3972e025-4b78-4631-837a-440970670236.1574309670.5.1583326897.1578040715.f3cb55de-9d9c-43e3-b38b-ed7dcefae866; client_width=897'}
proxies={'http': 'http://60.188.16.15:3000', 'https': 'https://60.188.16.15:3000'}

def is_file_exist(word):
    #word = unquote(word)
    path = 'E:/picture/foodiesfeed/' + word
    if not os.path.exists(path):
        os.makedirs(path)

def getimg(keyword,page):
    page = str(page)
    keyword = unquote(keyword)
    path = 'E:/picture/foodiesfeed/' + keyword+'/%s'%page
    if not os.path.exists(path):
        os.makedirs(path)
    #设置存储文件路径
    url='https://www.foodiesfeed.com/tag/'+keyword+'/page/'+page
    #获取地址

    r = requests.get(url)
    rand = random.randint(1,5)
    time.sleep(rand)
    soup = BeautifulSoup(r.content, "html.parser")
    # 用BeautifulSoup这个库解析html格式的字符串，把网页的源码解析成一个个类

    all_a = soup.find_all('img',class_='cover-img wp-post-image')
    #print(all_a)
    # 根据网页源码分析，图片存储在标签div下的img标签中，div标签的class都为lazy

    for i in range(len(all_a)):
        imgurl = all_a[i].attrs['src']    
        #print(imgurl)
       
            
        print("正在下载：%s" % imgurl)
        # imgurl是图片的地址，一个地址一张图片，然后通过urllib里面的urlretrieve的这个函数把图片载到本地
   

        try:
            r = requests.get(imgurl,timeout=3)
        except:
            continue
            
        path1= path +'/%s.jpg'%i
        if not os.path.exists(path1):
            try:
                with open(path+'/%s.jpg'%i, 'wb') as f:
                    f.write(r.content)
                print("success_download")
            except:
                print("something wrong")
        else:
              print("图片已存在")

        #try:
            #urllib.request.urlretrieve(imgurl, path + '/%s.jpg' % i)
        #except:
            #print('something wrong')


if __name__ == '__main__':
    keyword = input('请输入关键字：')
    # 输入需要 搜索图片的关键词
    pagenumber = input('请输入爬取的页数：')
    pagenumber = int(pagenumber)
    # 输入需要爬取图片的页数
    # keyword = urllib.parse.quote(keyword)
    is_file_exist(keyword)
    # 判断文件夹是否存在
    for page in range(1,pagenumber+1):
        getimg(keyword,page)
