#!/usr/bin/python
# -*- coding: utf-8 -*-
# encoding=utf-8
global headers
import os
import bs4
from bs4 import BeautifulSoup
import requests
import sys
# 解决py3的编码问题，如果环境是utf-8就不用以下两句
import importlib

importlib.reload(sys)

# 定义假浏览器头
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36'}

# 爬图的地址
mzitu = "http://www.mzitu.com/all/"
# 存储位置
save_path = '/mzt/all/'


# 创建文件夹的方法
def creatDict(filePath):
    # 如果不存在这个路径，则创建文件夹
    if os.path.exists(filePath) is False:
        os.makedirs(filePath)
        # 切换路径至上面创建的dir
    os.chdir(filePath)


def createOneDict(filePath):
    # 如果不存在这个路径，则创建文件夹
    if os.path.exists(filePath) is False:
        os.mkdir(filePath)
        # 切换路径至上面创建的dir
    os.chdir(filePath)


def main():
    global headers
    homePage = mzitu

    # 创建文件夹
    creatDict(save_path)

    '''
    requests的get方法底层是sessions.py的方法，参数默认如下
     def request(self, method, url,
            params=None, data=None, headers=None, cookies=None, files=None,
            auth=None, timeout=None, allow_redirects=True, proxies=None,
            hooks=None, stream=None, verify=None, cert=None, json=None):
    '''
    respose = requests.get(homePage, headers=headers)
    soup = BeautifulSoup(respose.text, 'html.parser')
    all_a = soup.find('ul', class_='archives').find_all("a", target="_blank")
    count = 1
    for a in all_a:
        count += count
        href_item = a.attrs['href']
        print("这是" + str(count) + "第个图的链接：" + href_item)
        # 将下载路径改为套图号码的文件夹中
        href_str = href_item.split("/")[-1]
        #如果不是第一次创建文件，就要先回到上层文件夹，然后再创建文件夹
        #下三行代码注释了就可以全部放一个文件夹里
        if count != 1:
            os.chdir(save_path)
        createOneDict(href_str)
        # 进入套图链接
        res_item = requests.get(href_item, headers=headers)
        soup_item = BeautifulSoup(res_item.text, 'html.parser')
        # 包一层皮
        try:
            max_no = soup_item.find('div', class_='pagenavi').find_all('span')[6].text
            for i in range(1, int(max_no) + 1):
                pgone = '/';
                if i != 1:
                    pgone += str(i)
                href = href_item + pgone
                pic_html = requests.get(href, headers=headers)
                soup_pic = BeautifulSoup(pic_html.text, 'html.parser')
                pic_url = soup_pic.find('div', class_='main-image').find('img')
                if isinstance(pic_url, bs4.element.Tag):
                    src = pic_url.attrs['src']
                    print("src:" + src)
                    array = src.split('/')
                    # 根据观察发现 链接的最后一串字符串是图片名
                    file_name = array[len(array) - 1]
                    # 防盗链加入Referer
                    headers = {'Referer': href}
                    img_res = requests.get(src, headers=headers)
                    print("**下载图片" + file_name)
                    f = open(file_name, 'ab')
                    f.write(img_res.content)
                    f.close()
        except Exception as e:
            print(e)


if __name__ == '__main__':
    main()
