# -*- coding: UTF-8 -*-
import requests
import time
import sys
import os
from bs4 import BeautifulSoup

# https://www.mzitu.com/hot/page/2/
URL = 'https://www.mzitu.com/zipai/comment-page-{}'
IMAGE_PATH = sys.path[0] + "/image/zipai"
HEADER = {
    "User-Agent": "Mozilla/6.0 (Windows NT 10.0; Win64; x64; rv:61.0) Gecko/20100111 Firefox/66.0"
}

HEADER_2 = {
    "User-Agent": "Mozilla/6.0 (Windows NT 10.0; Win64; x64; rv:61.0) Gecko/20100201 Firefox/66.0",
    "Referer": "https://www.mzitu.com/",
    "Sec-Fetch-Mode": "no-cors"
}


def create_dir(name):
    if not os.path.exists(name):
        os.makedirs(name)


def download_cover(url, save_path, data_time):
    cover_image = requests.get(url, headers=HEADER_2)
    file = os.path.join(save_path, 'cover_{}.jpg'.format(data_time))
    with open(file, 'wb') as f:
        f.write(cover_image.content)
        time.sleep(0.2)


def download_detail(url, save_path, index):
    # 内部详情图
    final_url = "{}/{}".format(url, str(index))
    detail_response = requests.get(final_url, headers=HEADER)
    if detail_response.status_code != 200:
        print("download %s failed, status code = %d" % (final_url, detail_response.status_code))
        return

    detail_container = BeautifulSoup(detail_response.text, 'lxml')
    main_image_div = detail_container.find('div', class_='main-image')
    main_image = main_image_div.find('img')['src']
    image_response = requests.get(main_image, headers=HEADER_2)
    file = os.path.join(save_path, 'zz_{}.jpg'.format(str(index)))

    print("第%d张图片..." % (index))
    with open(file, 'wb') as f:
        f.write(image_response.content)
        time.sleep(1)

    download_detail(url, save_path, index + 1)


def download_page(index, meizi_index):
    print("正在获取第%d页的妹子信息...." % (index))
    page_url = URL.format(index)
    response = requests.get(page_url, headers=HEADER)

    if index > 3:
        return

    if response.status_code != 200:
        print("download %s failed, status code = %d" % (page_url, response.status_code))
        return
    soup = BeautifulSoup(response.text, 'lxml')
    # print(response.text)
    pic_list_response = soup.findAll('img', class_="lazy")
    for pic in pic_list_response:
        image_url = pic['data-original']
        image_response = requests.get(image_url, headers=HEADER_2)
        file = os.path.join(IMAGE_PATH, 'mzi_{}.jpg'.format(meizi_index))
        print("正在下载第%d位妹子图片..." % (meizi_index))
        with open(file, 'wb') as f:
            f.write(image_response.content)
            time.sleep(5)

        meizi_index = meizi_index + 1

    download_page(index + 1, meizi_index)


if __name__ == '__main__':
    create_dir(IMAGE_PATH)
    download_page(1, 1)
