#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @FileName  :1.0.5 秀色女神网图片爬虫.py
# @Time      :2020/7/18 12:35
# @Author    :亮亮
# @说明       :
# @总结       :
import requests
from fake_useragent import UserAgent
from lxml import etree
import time
import random
import os
import multiprocessing
from urllib.request import Request, urlopen  # 构建get_html1函数
import ssl  # 构建get_html1函数


def get_html1(url, i):
    """
    得到
    :param url: 网址url
    :return: 网址内容
    """

    time.sleep(5)

    headers = {
        "User-Agent": UserAgent().random,
        "Referer": "https://www.xsnvshen.com/album/?p={}".format(i)
    }

    # 创建代理IP
    proxies = [{"http": "107.167.195.2"}, {"http": "119.82.253.175"}, {"http": "187.191.4.88"}]
    proxies = random.choice(proxies)

    # 2.构建response返回对象
    while True:
        try:
            response = requests.get(url, headers=headers, verify=False, proxies=proxies)
            break
        except:
            print("Connection refused by the server..")
            print("Let me sleep for 5 seconds")
            print("ZZzzzz...")
            time.sleep(5)
            continue

    return response


def get_html2(url):
    """
    得到
    :param url: 网址url
    :return: 网址内容
    """

    time.sleep(2)

    headers = {
        "User-Agent": UserAgent().random,
        "Referer": "https://www.xsnvshen.com/album",
        "Cookie": "__cfduid=d35cd31104ed8b55733c4c266b398b67d1594986258"
    }

    # 创建代理IP
    proxies = [{"http": "107.167.195.2"}, {"http": "119.82.253.175"}, {"http": "187.191.4.88"}]
    proxies = random.choice(proxies)

    # 2.构建response返回对象
    while True:
        try:
            response = requests.get(url, headers=headers, verify=False, proxies=proxies)
            break
        except:
            print("Connection refused by the server..")
            print("Let me sleep for 5 seconds")
            print("ZZzzzz...")
            time.sleep(5)
            continue

    return response


def re_html_1(info):
    """
    解析第一层网页的标题和地址
    :param info: 网页内容
    :return: 标题&url地址
    """
    # 1.构建解析对象
    e = etree.HTML(info)

    # 2.构建解析规则
    title_names = e.xpath('//div[@class="camLiCon"]/div/p/a/text()')
    img_numbers = e.xpath('//li[@class="min-h-imgall_300"]/a/div/span[@class="num"]/text()')
    add_names = e.xpath('//li[@class="min-h-imgall_300"]/a/@href')

    # 3.构建二级网页地址
    img_urls = []
    for add_name in add_names:
        img_url = "https://www.xsnvshen.com/" + add_name
        img_urls.append(img_url)

    return title_names, img_urls, img_numbers


def re_html_2(info2, img_numbers):
    """
    从二级网页提取出照片url地址
    :param info2: 二级网页内容
    :return: 图片地址列表
    """

    # 1.构建解析对象
    e = etree.HTML(info2)

    # 2.构建图片地址解析规则
    old_img_url = e.xpath('//img[@id="bigImg"]/@src')
    old_img_url = old_img_url[0]

    # 3.构建新的图片url列表
    img_urls_list = []
    print(img_numbers)
    for i in range(int(img_numbers)):
        new_img_url = "https:" + old_img_url[:-7] + "{:0>3d}.jpg".format(i)
        img_urls_list.append(new_img_url)

    return img_urls_list


def save_picture(q, title_name, number):
    """保存图片"""

    # 创建文件夹
    if not os.path.exists("测试文件夹/{}".format(title_name)):
        os.mkdir("测试文件夹/{}".format(title_name))

    # 图片保存，从线程池获取图片地址
    file_name = "{}{}".format(number, ".jpg")
    # print("正在下载{}".format(file_name))
    with open("测试文件夹/{}".format(title_name) + "/" + file_name, "wb") as f:
        get = get_html2(q.get())
        f.write(get.content)


def main2(url, i):
    info = get_html1(url, i).text

    # 4.从一级网页中解析出标题&网址
    title_names, img_urls, img_numbers = re_html_1(info)
    # todo 删除两个打印
    print(title_names)
    print(img_urls)
    print(img_numbers)

    # 6.获得二级网页的内容
    name_number = 0
    for img_url in img_urls:

        # 判断跳出
        if name_number == 24:
            break

        po = multiprocessing.Pool(15)
        q = multiprocessing.Manager().Queue()

        # 如果返回出错则跳出此次循环
        response = get_html2(img_url)

        # 提示下载项目
        print("----二级标题：{}----".format(title_names[name_number]))
        info2 = response.text

        # 解析出二级网页中的图片地址
        try:
            img_urls_list = re_html_2(info2, img_numbers[name_number])
            print(img_urls_list)
        except:
            print("该页面图片解析错误")
            break

        # 将列表遍历并加入队列
        for picture_url in img_urls_list:
            q.put(picture_url)

        number = 0
        for i in range(int(img_numbers[name_number])):
            po.apply_async(save_picture, args=(q, title_names[name_number], number))
            number += 1

        name_number += 1
        po.close()
        po.join()

    return


def main():
    # 0. 输入内容
    papers = int(input("输入要爬的页数：")) + 1

    # 1. 输入网址
    old_url = "https://www.xsnvshen.com/album/?p={}"

    # 2.构建所有网页地址列表
    list_url = []
    for i in range(16, papers):
        new_url = old_url.format(i)
        list_url.append(new_url)

    print(list_url)
    # 3.获取网页内容
    i = 0
    for url in list_url:
        i += 1
        print("----正在下载第{}页----".format(i))
        main2(url, i)

    print("全部完成")


if __name__ == '__main__':
    main()
