# -*- coding: utf-8 -*-
import multiprocessing
import os.path
import requests
from lxml import etree

# 爬 http://www.mzitu.com/ 最热前10页图片

root_dir = r'D:\work\project\spiders\mzitu\result'
headers = {
    'Host': 'i.meizitu.net',
    'Pragma': 'no-cache',
    'Accept-Encoding': 'gzip, deflate',
    'Accept-Language': 'zh-CN,zh;q=0.8,en;q=0.6',
    'Cache-Control': 'no-cache',
    'Connection': 'keep-alive',
    'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_5) AppleWebKit/537.36 (KHTML, like Gecko) '
                    'Chrome/59.0.3071.115 Safari/537.36',
    'Accept': 'image/webp,image/apng,image/*,*/*;q=0.8',
    # 'Referer': '{}'.format(referer),
}


def do_one_page(url_info):
    # 创建目录
    try:
        dirname = os.path.join(root_dir, url_info[0])
        if not os.path.exists(dirname):
            os.makedirs(dirname)
        else:
            return

        # 获取页数
        r = requests.get(url_info[1])
        tree = etree.HTML(r.text)
        pages_num = int(tree.xpath('//div[@class="pagenavi"]/a/span/text()')[-2])

        # 下载图片
        for i in range(1, pages_num + 1):
            url = url_info[1] + '/%d' % i
            rr = requests.get(url)
            t = etree.HTML(rr.text)
            img_url = t.xpath('//div[@class="main-image"]/p/a/img/@src')[0]

            # 写文件
            fullname = os.path.join(dirname, os.path.basename(img_url))
            print(fullname)
            if os.path.exists(fullname) and os.path.getsize(fullname) != 0:
                continue
            headers['Referer'] = url
            with open(fullname, 'wb') as fp:
                fp.write(requests.get(img_url, headers=headers).content)
    except:
        pass


def get_all_page_url():
    result = []
    for i in range(1, 11):
        url = 'http://www.mzitu.com/hot/page/%d/' % i
        r = requests.get(url)
        tree = etree.HTML(r.text)
        pages = tree.xpath('//ul[@id="pins"]/li/span/a')
        for i, e in enumerate(pages):
            result.append((e.text, e.attrib['href']))
    return result


if __name__ == '__main__':
    multiprocessing.freeze_support()
    pool = multiprocessing.Pool(max(multiprocessing.cpu_count() - 1, 1))
    pool.map(do_one_page, get_all_page_url())
    pool.close()
    pool.join()
