# 昵图网图片爬取
# http://soso.nipic.com/q_%e5%9b%bd%e7%94%bb_g_0.html

import urllib.request
import re
import multiprocessing
import os
import requests
from bs4 import BeautifulSoup
from urllib import parse
from urllib.parse import unquote
from urllib.parse import quote


headers = {
    'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'
}
cookies = {'cookies': ''}

def is_file_exist(word):
    word = unquote(word)
    path = 'E:/picture/nipic/' + word
    if not os.path.exists(path):
        os.makedirs(path)

def getimg(keyword,page):
    page = str(page)
    keyword = unquote(keyword)
    path = 'E:/picture/nipic/' + keyword+'/%s'%page
    if not os.path.exists(path):
        os.makedirs(path)
    #设置存储文件路径
    url='http://soso.nipic.com/?q='+keyword+'&g=0&page='+page
    #获取地址

    r = requests.get(url)
    soup = BeautifulSoup(r.content, "html.parser")
    # 用BeautifulSoup这个库解析html格式的字符串，把网页的源码解析成一个个类

    all_a = soup.find_all('img', class_='lazy')
    # 根据网页源码分析，图片存储在标签div下的img标签中，div标签的class都为lazy

    for i in range(len(all_a)):
        imgurl = all_a[i].attrs["data-original"]
        if imgurl[-5] == '4':
            imgurl = imgurl.replace('/pic/','/file/')
            imgurl = imgurl.replace('_4.','_2.')
        elif imgurl[-5] == '0':
            imgurl = imgurl.replace('/pic/', '/res/')
            imgurl = imgurl.replace('_0.', '_1.')
        #将图片url转换为高清链接（file）
        print("正在下载：%s" % imgurl)
        # imgurl是图片的地址，一个地址一张图片，然后通过urllib里面的urlretrieve的这个函数把图片载到本地
        try:
            urllib.request.urlretrieve(imgurl, path + '/%s.jpg' % i)
        except:
            print('something wrong')


if __name__ == '__main__':
    keyword = input('请输入关键字：')
    # 输入需要 搜索图片的关键词
    pagenumber = input('请输入爬取的页数：')
    pagenumber = int(pagenumber)
    # 输入需要爬取图片的页数
    keyword = urllib.parse.quote(keyword)
    is_file_exist(keyword)
    # 判断文件夹是否存在
    for page in range(26,pagenumber+1):
        getimg(keyword,page)
