# -*- coding: UTF-8 -*-
import requests
import time
import sys
import os
from bs4 import BeautifulSoup

# https://www.mzitu.com/hot/page/2/
URL = 'https://www.mzitu.com/hot/'
IMAGE_PATH = sys.path[0] + "/image"
HEADER = {
    "User-Agent": "Mozilla/6.0 (Windows NT 10.0; Win64; x64; rv:61.0) Gecko/20100101 Firefox/66.0"
}

HEADER_2 = {
    "User-Agent": "Mozilla/6.0 (Windows NT 10.0; Win64; x64; rv:61.0) Gecko/20100101 Firefox/66.0",
    "Referer": "https://www.mzitu.com/",
    "Sec-Fetch-Mode": "no-cors"
}


def create_dir(name):
    if not os.path.exists(name):
        os.makedirs(name)


def download_cover(url, save_path, data_time):
    cover_image = requests.get(url, headers=HEADER_2)
    file = os.path.join(save_path, 'cover_{}.jpg'.format(data_time))
    with open(file, 'wb') as f:
        f.write(cover_image.content)
        time.sleep(1)


def download_detail(url, save_path, index):
    # 内部详情图
    final_url = "{}/{}".format(url, str(index))
    detail_response = requests.get(final_url, headers=HEADER)
    if detail_response.status_code != 200:
        print("download %s failed, status code = %d" % (final_url, detail_response.status_code))
        return

    detail_container = BeautifulSoup(detail_response.text, 'lxml')
    main_image_div = detail_container.find('div', class_='main-image')
    main_image = main_image_div.find('img')['src']
    image_response = requests.get(main_image, headers=HEADER_2)
    file = os.path.join(save_path, 'mzi_{}.jpg'.format(str(index)))

    print("第%d张图片..." % (index))
    with open(file, 'wb') as f:
        f.write(image_response.content)
        time.sleep(0.2)

    download_detail(url, save_path, index + 1)


def download_page(index):
    meizi_index = 1
    print("正在获取第%d页的妹子信息...." % (index))
    page_url = "{}/page/{}/".format(URL, index)
    response = requests.get(page_url, headers=HEADER)
    if response.status_code != 200 or index > 5:
        print("download %s failed, status code = %d" % (page_url, response.status_code))
        return
    soup = BeautifulSoup(response.text, 'lxml')
    content = soup.find('ul', id='pins')
    pic_list = content.find_all('li')

    for pic in pic_list:
        cover_container = pic.find('img')
        cover = cover_container['data-original']
        title = cover_container['alt']
        data_time = pic.find('span', class_='time')
        # 二级目录
        sub_url_container = pic.find('a')
        sub_url = sub_url_container['href']
        # 创建文件夹
        path = os.path.join(IMAGE_PATH, "meizi_{}_{}".format(index, meizi_index))
        create_dir(path)
        print("正在下载第%d位妹子的图片...." % (meizi_index))
        download_cover(cover, path, data_time.text)
        download_detail(sub_url, path, 1)
        meizi_index = meizi_index + 1

    download_page(index + 1)


if __name__ == '__main__':
    create_dir(IMAGE_PATH)
    download_page(1)
