#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @FileName  :1.0.3 魅力女人网图片爬虫.py
# @Time      :2020/7/16 22:33
# @Author    :亮亮
# @说明       :
# @总结       :xpath属性里的值不用加text
"""
1.需要增加延时重下功能
3.显示优化
4.重复下载判断
5.特殊空照片格式不导入
6.代理池发爬
7.延时反爬
8.云端部署
"""
import requests
from fake_useragent import UserAgent
from lxml import etree
import os
import multiprocessing


def get_html(url):
    """
    得到网页内容
    :param url: 网址url
    :return: 网址内容
    """

    try:
        # 1.构建请求头
        headers = {
            "User-Agent": UserAgent().random
        }

        # 提示
        # print("----正在获取网页{}----".format(url))

        # 2.构建response返回对象
        response = requests.get(url, headers=headers)

        # 3.提取返回中的内容
        # info = response.text

        return response

    except:
        return 404


def re_html_1(info):
    """
    解析第一层网页的标题和地址
    :param info: 网页内容
    :return: 标题&url地址
    """
    # 1.构建解析对象
    e = etree.HTML(info)

    # 2.构建解析规则
    title_names = e.xpath('//div[@class="fv_list_box"]/dl/dd/span/a/text()')
    img_urls = e.xpath('//div[@class="fv_list_box"]/dl/dd/span//@href')

    return title_names, img_urls


def re_html_2(info2):
    """
    从二级网页提取出照片url地址
    :param info2: 二级网页内容
    :return: 图片地址列表
    """

    # 1.构建解析对象
    e = etree.HTML(info2)

    # 2.构建图片地址解析规则
    img_urls_list = e.xpath('//div[@class="fv_art_body"]/img/@src')

    return img_urls_list


def save_picture(picture_url, title_name, number):
    """保存图片"""

    # 创建文件夹
    if not os.path.exists("测试文件夹/{}".format(title_name)):
        os.mkdir("测试文件夹/{}".format(title_name))

    # 图片保存，从线程池获取图片地址
    file_name = "{}{}".format(number, ".jpg")
    # print("正在下载{}".format(file_name))
    with open("测试文件夹/{}".format(title_name) + "/" + file_name, "wb") as f:
        get = get_html(picture_url)
        if get != 404:
            f.write(get.content)


def main2(url):

    try:
        info = get_html(url).text

    except:
        return

    # 4.从一级网页中解析出标题&网址
    title_names, img_urls = re_html_1(info)
    # todo 删除两个打印
    print(title_names)
    print(img_urls)

    # 5.创建进程池和队列
    po = multiprocessing.Pool(5)
    q = multiprocessing.Manager().Queue()

    # 6.获得二级网页的内容
    name_number = 0
    for img_url in img_urls:

        # 如果返回出错则跳出此次循环
        response = get_html(img_url)
        if response == 404:
            continue

        elif type(response) == int:
            continue
        else:
            # 提示下载项目
            print("----二级标题：{}----".format(title_names[name_number]))
            info2 = response.text

            # 解析出二级网页中的图片地址
            img_urls_list = re_html_2(info2)

            # 将列表遍历并加入队列
            number = 0
            for picture_url in img_urls_list:
                q.put(picture_url)
                po.apply_async(save_picture, args=(picture_url, title_names[name_number], number))
                number += 1

            name_number += 1

    # 关闭进程池并阻塞进程
    po.close()
    po.join()


def main():
    # 1. 输入测试
    papers = int(input("输入要爬的页数：")) + 1

    # 1. 输入网址
    old_url = "http://www.jr2.cn/zfyh/list_{}.html"

    # 2.构建所有网页地址列表
    list_url = []
    for i in range(1, papers):
        new_url = old_url.format(i)
        list_url.append(new_url)

    # 3.获取网页内容
    i = 0
    for url in list_url:
        if get_html(url) == 404:
            continue
        else:
            i += 1
            print("----正在下载第{}页----".format(i))
            main2(url)

    print("全部完成")


if __name__ == '__main__':
    main()
