import re
import time

import requests
import base64
import os

from bs4 import BeautifulSoup

base_url = "http://jandan.net/ooxx"
mei_zi_url = "http://jandan.net/ooxx/page-{}#comments"
save_path = './images/'
headers = {
    'user-agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 9_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13B143 Safari/601.1'
}

base_file_name = "第{}期"
file_name = ""


def get_html(url):
    req = requests.get(url, headers=headers)
    return req.text


def get_img(html):
    soup = BeautifulSoup(html, 'lxml')  # 解析网页
    # span class="img-hash"
    spans = soup.select("span.img-hash")  # 定位元素
    return [span.get_text() for span in spans]


def parse_img(imgs):
    """
    解析img将图片转成http:格式
    :param imgs:
    :return:
    """
    return ['http:' + bytes.decode(base64.b64decode(img), encoding='utf-8') for img in imgs]


def down_img(url):
    """
    下载图片
    :param url:
    :return:
    """
    req = requests.get(url)
    index = url.rfind('/')
    img_name = url[index + 1:]
    # print(url)
    # print(img_name)
    file = save_path + file_name
    # print(file)
    if not os.path.exists(file):
        os.mkdir(file)
    with open(file + '/' + img_name, 'wb') as f:
        f.write(req.content)


def start_download(imgs):
    """
    遍历url去下载
    :param imgs:
    :return:
    """
    for url in imgs:
        down_img(url)


def get_page_count(url):
    """
    获取煎蛋妹子网页的个数
    :param url:
    :return:
    """
    req = requests.get(url, headers=headers)
    soup = BeautifulSoup(req.content, 'lxml')  # 解析网页
    spans = soup.select_one("span.current-comment-page")
    str_count = re.findall('\[(\d*)\]', spans.get_text())[0]  # 定位元素
    return int(str_count)


def check_page(count):
    global file_name
    for i in range(1, count + 1):
        file_name = base_file_name.format(i)
        print('开始爬取到第' + str(i) + '页')
        print(file_name)
        url = mei_zi_url.format(i)
        page = get_html(url)
        img_list = get_img(page)
        img_url = parse_img(img_list)
        start_download(img_url)
        print('休眠1s')
        time.sleep(1)


count = get_page_count(base_url)
check_page(count)
