import os
import re

import requests
from bs4 import BeautifulSoup


class Mztu:
    """
    Mztu爬虫类
    """

    def __init__(self):
        self.headers = {
            "Accept": "image/webp,image/apng,image/*,*/*;q=0.8",
            # "Accept-Encoding":"gzip, deflate",
            "Accept-Language": "zh-CN,zh;q=0.8",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.101 Safari/537.36",
            "Connection": "keep-alive",
            "Referer": "http://www.mzitu.com"
        }

    def get_picture_url_list(self, url):
        """
        获取每个图集的url
        :param url:图集列表的url
        :return:返回图集的url列表
        """
        response = requests.get(url)
        url_list = []
        result_set = BeautifulSoup(response.text, 'html.parser').find('ul', id="pins").select("span a")
        for result in result_set:
            url_list.append(result['href'])
        return url_list

    def get_page_info(self, url):
        """
        获取图集详情信息（图片数量，标题）
        :param url:图集的url
        :return:返回元组（图片数量，标题）
        """
        response = requests.get(url)
        page_size = BeautifulSoup(response.text, 'html.parser').find('div', class_="pagenavi").find_all("span")[6].string
        page_title = BeautifulSoup(response.text, 'html.parser').find('title').string
        return page_size, page_title

    def get_picture_url(self, url, page_size):
        """
        获取图集中每个图片的url
        :param url: 图集的url
        :param page_size: 图片数量
        :return: 返回图片url列表
        """
        pages = []
        for index in range(1, int(page_size) + 1):
            response = requests.get(url + "/" + index.__str__())
            pages.append(BeautifulSoup(response.text, 'html.parser').find('img')['src'])
        return pages

    def get_file_path(self, file_root, page_size, page_title):
        """
        获取保存图片的文件夹路径
        :param file_root:文件根目录
        :param page_size:图片大小
        :param page_title:图片标题
        :return:返回图片文件夹路径
        """
        if not os.path.exists(file_root):
            os.mkdir(file_root)
        dir_name = u"【%sP】%s" % (str(page_size), re.sub('[\\\/:*?"<>|]', '_', page_title))
        file_path = file_root + dir_name + '/'
        if not os.path.exists(file_path):
            os.mkdir(file_path)
            print("%s 目录创建完成" % dir_name)
        else:
            print("%s 目录已存在" % dir_name)
        return file_path

    def save_picture(self, pages, file_path):
        """
        保存图片
        :param pages: 图片url列表
        :param file_path: 保存图片的文件夹路径
        :return:
        """
        for page in pages:
            image = requests.get(page, headers=self.headers)
            file_paths = file_path + page[-9:]
            if not os.path.exists(file_paths):
                file_writer = open(file_paths, 'wb+')
                file_writer.write(image.content)
                file_writer.close()
                print("%s 文件已下载完成" % (page[-9:]))
            else:
                print("%s 文件已存在" % (page[-9:]))
                break
