# -*- coding:utf-8 -*-

import requests
from bs4 import BeautifulSoup
import time
import os
from Download import download

def request(url):       ##对网址的请求函数
    #headers = {
    #    'User-Agent': "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36"}
    #content=requests.get(url,headers=headers)  ##返回一个requests的对象
    Download =download()
    content=Download.get(url,3)

    return content

def mkdir(filename):       ##判断文件是否存在的函数
    filename=filename.strip()

    path="e:\\meizitu\\"+filename
    isExists=os.path.exists(path)
    if not isExists:
        print u"新建一个名为：【"+filename+u"】的文件夹"
        os.mkdir(path)
        os.chdir(path)
        return True
    else:
        print u"名字为：【"+filename+u"】已经存在"
        os.chdir(path)
        return False

def a_html(url):
    html=request(url)
    max_span=BeautifulSoup(html.text,'lxml').find('div',class_='pagenavi').find_all('span')[-2].get_text()
    for page in range(1,int(max_span)+1):
        page_url=url+'/'+str(page)
        img(page_url)

def img(pag_url):
    img_html=request(pag_url)
    img_url=BeautifulSoup(img_html.text,'lxml').find('div',class_='main-image').find('img')['src']
    save(img_url)

def save(img_url):
    name=img_url[-9:-4]
    img=request(img_url)
    f=open(name+'.jpg','ab')
    f.write(img.content)
    f.close()

def all_html(url):
    html=request(url)
    all_a=BeautifulSoup(html.text,'lxml').find('ul',class_="archives").find_all('a')
    for a in all_a:
        title=a.get_text()
        title = title.replace('?', "")
        print u"开始保存--"+title
        mkdir(title)
        href=a['href']
        a_html(href)

if __name__=="__main__":

    all_html('http://www.mzitu.com/all')


