#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @FileName  :1.0.4 秀色女神.py
# @Time      :2020/7/17 19:49
# @Author    :亮亮
# @说明       :网站秀色女神网站照片爬取
# @总结       :
import requests
from fake_useragent import UserAgent
from lxml import etree
import os
import multiprocessing
from urllib.request import Request, urlopen  # 构建get_html1函数
import ssl  # 构建get_html1函数


# def get_html1(url):
#     """
#     获取一级网页内容info
#     :param url: 一级网页url
#     :return: 网页内容
#     """
#     r = Request(url)
#
#     # 忽略证书
#     context = ssl._create_unverified_context()
#     response = urlopen(r, context=context)
#
#     info = response.read().decode()
#
#     return info


def get_html1(url):
    """
    得到
    :param url: 网址url
    :return: 网址内容
    """
    headers = {
        "User-Agent": UserAgent().random,
        "Referer": "https://www.xsnvshen.com/album/32246"
    }


    # 2.构建response返回对象
    response = requests.get(url, headers=headers, verify=False)


    return response


def get_html2(url):
    """
    得到
    :param url: 网址url
    :return: 网址内容
    """
    headers = {
        "User-Agent": UserAgent().random
    }

    # 2.构建response返回对象
    response = requests.get(url, headers=headers, verify=False)

    return response


def re_html_1(info):
    """
    解析第一层网页的标题和地址
    :param info: 网页内容
    :return: 标题&url地址
    """
    # 1.构建解析对象
    e = etree.HTML(info)

    # 2.构建解析规则
    title_names = e.xpath('//div[@class="camLiCon"]/div/p/a/text()')
    img_numbers = e.xpath('//li[@class="min-h-imgall_300"]/a/div/span[@class="num"]/text()')
    add_names = e.xpath('//li[@class="min-h-imgall_300"]/a/@href')

    # 3.构建二级网页地址
    img_urls = []
    for add_name in add_names:
        img_url = "https://www.xsnvshen.com/" + add_name
        img_urls.append(img_url)

    return title_names, img_urls, img_numbers


def re_html_2(info2, img_numbers):
    """
    从二级网页提取出照片url地址
    :param info2: 二级网页内容
    :return: 图片地址列表
    """

    # 1.构建解析对象
    e = etree.HTML(info2)

    # 2.构建图片地址解析规则
    old_img_url = e.xpath('//img[@id="bigImg"]/@src')
    old_img_url = old_img_url[0]

    # 3.构建新的图片url列表
    img_urls_list = []
    print(img_numbers)
    for i in range(int(img_numbers)):
        new_img_url = "https:" + old_img_url[:-7] + "0{}.img".format(i)
        print(new_img_url)
        img_urls_list.append(new_img_url)

    return img_urls_list


def main2(url):
    # 获取网页
    # for i in range(3):
    #     try:
    #         info = get_html1(url).text
    #     except:
    #         print("--本页出错--")
    #         if i == 2:
    #             return
    #         else:
    #             continue
    info = get_html1(url).text

    # 4.从一级网页中解析出标题&网址
    title_names, img_urls, img_numbers = re_html_1(info)
    # todo 删除两个打印
    print(title_names)
    print(img_urls)
    print(img_numbers)

    # 5.创建进程池和队列
    po = multiprocessing.Pool(5)
    q = multiprocessing.Manager().Queue()

    # 6.获得二级网页的内容
    name_number = 0
    for img_url in img_urls:

        # 如果返回出错则跳出此次循环
        response = get_html2(img_url)

        # 提示下载项目
        print("----二级标题：{}----".format(title_names[name_number]))
        info2 = response.text

        # 解析出二级网页中的图片地址
        img_urls_list = re_html_2(info2, img_numbers[name_number])
        print(img_urls_list)

        # 将列表遍历并加入队列
        number = 0
        for picture_url in img_urls_list:
            q.put(picture_url)
            po.apply_async(save_picture, args=(picture_url, title_names[name_number], number))
            number += 1

        name_number += 1


def main():
    # 1. 输入内容
    papers = int(input("输入要爬的页数：")) + 1

    # 1. 输入网址
    old_url = "https://www.xsnvshen.com/album/?p={}"

    # 2.构建所有网页地址列表
    list_url = []
    for i in range(1, papers):
        new_url = old_url.format(i)
        list_url.append(new_url)

    print(list_url)

    # 3.获取网页内容
    i = 0
    for url in list_url:
        i += 1
        print("----正在下载第{}页----".format(i))
        main2(url)

    print("全部完成")


if __name__ == '__main__':
    main()
    https://img.xsnvshen.com/thumb_600x900 / album / 20763 / 24036 / 010.j
    pg
