#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Created by master on 2017/3/13
import re

from bs4 import BeautifulSoup

import requests

import os


class Luoo(object):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64)'
                      ' AppleWebKit/537.36 (KHTML, like Gecko) '
                      'Chrome/44.0.2403.157 UBrowser/5.5.9703.2'
                      ' Safari/537.36'}

    # 创建soup
    @staticmethod
    def make_soup(html_text):
        return BeautifulSoup(html_text, "lxml")  # 默认解析器

    # 获取网页内容
    @staticmethod
    def request_content(url, head=headers):  # 添加浏览器标识，防止被网站封锁
        return requests.get(url, head)

    # 创建文件夹
    @staticmethod
    def make_dir(path):
        path = re.sub(r'[\\/:*?"<>]', '#', path).strip()  # windows创建文件夹特殊字符过滤
        p = os.path.join("E:\Luoo", path)
        isExist = os.path.exists(p)
        if not isExist:  # 如果文件夹存在就不创建
            os.makedirs(p)

        os.chdir(p)  # 切换到目录

    # 创建文件夹
    @staticmethod
    def make_default_dir(title):
        path = re.sub(r'[\\/:*?"<>]', '#', title).strip()  # windows创建文件夹特殊字符过滤
        p = os.path.join("E:\meizitu", path)
        isExist = os.path.exists(p)
        if not isExist:  # 如果文件夹存在就不创建
            os.makedirs(p)

        os.chdir(p)  # 切换到目录

    # 保存图片
    @staticmethod
    def save_img(name, img):
        image_exist = os.path.exists(os.path.abspath(name))
        if not image_exist:
            with open(name, "ab") as f:  # 打开并写入文件，如果文件存在就跳过 "ab"二进制模式追加
                f.write(img.content)  # 获取二进制内容并写入
            print("[%s] 已成功保存到: %s" % (name, os.path.abspath(name)))
        else:
            print("[%s]  无需重复保存!" % name)

    # 记录爬到了第几页
    def save_current_page(self, page):
        with open("current.txt", "w") as f:
            f.write(str(page))

    # 读取上次爬到了第几页
    def get_current_page(self):
        try:
            with open("current.txt", "r") as f:
                return int(f.read())
        except Exception as e:
            return 1

    def start(self, start_url, limit):
        first_page = "http://www.luoo.net/tag/?p="
        self.make_dir("Luoo")
        html_text = self.request_content(start_url)
        start_soup = self.make_soup(html_text.text)
        page_num_list = start_soup.find("div", class_="paginator").find_all("a", class_="page")
        page_soup = self.make_soup(str(page_num_list[len(page_num_list) - 1])) \
            .find("a", class_="page")
        max_page = page_soup.get_text()
        limit = max_page if int(limit) > int(max_page) else limit
        for page in range(self.get_current_page(), int(limit) + 1):
            print("-------第[%s]页开始-------" % page)
            next_html = self.request_content(first_page + str(page))
            next_soup = self.make_soup(next_html.text)
            li_list = next_soup.find("div", class_="vol-list").find_all("div", class_="item")
            for i in li_list:
                image_soup = self.make_soup(str(i))
                image = image_soup.find("a", class_="cover-wrapper").find("img")["src"]
                image_url = str(image).split("?")[0]
                arr = image_url.split("/")
                name = arr[len(arr) - 1]
                self.save_img(name, self.request_content(image_url))
            self.save_current_page(page)
            print("-------第[%s]页结束-------" % page)


if __name__ == '__main__':
    start = "http://www.luoo.net/music"

    luoo = Luoo()
    luoo.start(start, 20)
