# -*- coding: utf-8 -*-
"""
@Time   : 2019/11/24 21:11
@Author : LeeCQ
"""
# !/usr/bin/env python
# -*- coding:utf-8 -*-

import requests
import re
import os
from bs4 import BeautifulSoup


HEADERS = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36',
    }


class Mzitu(object):
    """

    """
    URL = 'http://www.mzitu.com/all/'
    SAVE_PATH = './'

    def __init__(self):
        if not os.path.exists('./_sup/'): os.mkdir('./_sup/')

    def base_open(self, url):
        '''基础打开设置'''
        HEADERS['Host'] = 'www.mzitu.com'
        response = requests.get(url=url, headers=HEADERS, timeout=3)
        soup = BeautifulSoup(response.text, 'lxml')
        return soup

    def pic_lists(self):
        '''获得每套套图的URL'''
        soup = self.base_open(self.URL)
        list_urls = [url.get('href') for url in soup.select('p.url > a')]
        # with open('./_sup/_all_album.txt', 'w', encoding='utf8') as _afa:
        #     for a_url in list_urls:
        #         _afa.write(a_url+'\n')
        return list_urls

    def pic_urls(self, pic_list_url):
        '''获得每张图片的URL和需要的套图信息'''
        soup = self.base_open(pic_list_url)
        idd = re.search('\d+', pic_list_url).group()
        title = soup.select('h2.main-title')[0].text  # 获得套图的标题
        tags = soup.select('div.main-meta > span')[0].text.replace('分类：', '').strip()  # 获得分类
        times = soup.select('div.main-meta > span')[1].text.replace('发布于', '').strip()  # 获得发布时间
        # viewpage = soup.select('div.main-meta > span')[2].text.replace('次浏览', '').strip()  # 获得浏览量
        pagenums = soup.select('div.pagenavi > a')[-2].text  # 获得页码
        pic_url = [pic_list_url + '/' + str(num) for num in range(1, int(pagenums) + 1)]  # 获取图片每个页面的URL，装入列表
        images = [self.base_open(url).select('div.main-image > p > a > img')[0].get('src')
                  for url in pic_url]  # 获取每张图片的URL，装入列表
        data = {
            'idd': idd,
            'title': re.sub('[\/:*?"<>|]', '-', title),
            'times': times,
            'tags': tags,
            # 'viewpage': viewpage,
            'images': images
        }
        return data

    def sava_images(self, data):
        '''保存套图'''
        HEADERS['Host'] = 'i5.meizitu.net'
        HEADERS['Referer'] = 'http://www.mzitu.com/'
        file = self.SAVE_PATH + 'images' + '/' + data['title']
        if not os.path.exists(file):
            os.makedirs(file)
            for img in data['images']:
                imgname = img.split('/')[-1]
                images = requests.get(img, headers=HEADERS, timeout=3).content
                with open(file + '/' + imgname, 'wb') as fp:
                    fp.write(images)
            print('%s 套图已保存..' % data['title'])

    def run(self):
        '''整体运行'''
        for pic_list_url in self.pic_lists():
            self.sava_images(self.pic_urls(pic_list_url))


if __name__ == '__main__':
    m = Mzitu()
    m.run()
