#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Created by master on 2017/3/13
import re

import requests

from bs4 import BeautifulSoup

import os
import pickle


class MeiziTu(object):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64)'
                      ' AppleWebKit/537.36 (KHTML, like Gecko) '
                      'Chrome/44.0.2403.157 UBrowser/5.5.9703.2'
                      ' Safari/537.36'}

    # 创建soup
    @staticmethod
    def make_soup(html_text):
        return BeautifulSoup(html_text, "html.parser")  # 默认解析器

    # 获取网页内容
    @staticmethod
    def request_content(url, head=headers):  # 添加浏览器标识，防止被网站封锁
        return requests.get(url, head)

    # 创建文件夹
    @staticmethod
    def make_dir(title):
        path = re.sub(r'[\\/:*?"<>]', '#', title).strip()  # windows创建文件夹特殊字符过滤
        p = os.path.join("E:\meizitu", path)
        isExist = os.path.exists(p)
        if not isExist:  # 如果文件夹存在就不创建
            os.makedirs(p)
        os.chdir(p)  # 切换到目录

    # 保存图片
    @staticmethod
    def save_img(name, img):
        image_exist = os.path.exists(os.path.abspath(name))
        if not image_exist:
            with open(name, "ab") as f:  # 打开并写入文件，如果文件存在就跳过 "ab"二进制模式追加
                f.write(img.content)  # 获取二进制内容并写入
            print("[%s] 已成功保存到: [%s]" % (name, os.path.abspath(name)))
        else:
            print("[%s] 重复任务，自动跳过!" % name)

    # 记录爬到了第几页
    def save_current_page(self, page):
        with open("current_page.txt", "w") as f:
            f.write(str(page))

    # 读取上次爬到了第几页
    def get_current_page(self):
        try:
            with open("current_page.txt", "r") as f:
                return int(f.read())
        except Exception as e:
            return all_url

    # 保存爬过的链接
    def save_parsed_url(self, url):
        url = {url}
        try:
            with open("parsed_url.text", "rb") as f:
                s = set(pickle.load(f)) | url
            with open("parsed_url.text", "wb") as f:
                f.write(pickle.dumps(s))
        except FileNotFoundError as e:
            with open("parsed_url.text", "wb") as f:
                s_url = {url}
                f.write(pickle.dumps(s_url))
            pass

    # 判断链接是否已经爬过
    def url_exist(self, url):
        try:
            with open("parsed_url.text", "rb") as f:
                return url in pickle.load(f)
        except FileNotFoundError as e:
            return False

    # 启动
    def start(self, start_url, page=2):
        start_html = self.request_content(start_url)  # 获取开始页
        soup = self.make_soup(start_html.text)
        li_list = soup.find_all("a")  # 获取指定标签
        for li in li_list:
            title = li.get_text()  # 套图的标题
            href = li["href"]  # 套图页面的链接
            print("-------套图[%s][%s]开始-------" % (href, title))
            self.make_dir(title)
            html = self.request_content(href)
            html_soup = self.make_soup(html.text)
            # max_span = html_soup.find('div', class_='pagenavi').find_all("span")[-2].get_text()  # 获取最大分页
            for page in range(1, page):
                page_url = href + "/" + str(page)
                image_html = self.request_content(page_url)
                image_soup = self.make_soup(image_html.text)
                print(image_soup)
                image_url = image_soup.find("div", class_="main-image").find("img")["src"]
                arr = image_url.split("/")
                name = str(arr[len(arr) - 1])  # 取得图片名称
                img = self.request_content(image_url)
                self.save_img(name, img)
                self.save_current_page(page)
            print("-------套图[%s][%s]结束-------" % (href, title))


if __name__ == '__main__':
    all_url = 'http://www.mzitu.com/all'  # 开始的URL地址

    meizi = MeiziTu()
    meizi.start(all_url, 3)
