#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author：TanDabao
# CreateTime：2021/10/20 17:01

'''
爬取彼岸图网分类中一种前5页图片
'''

import requests
import os
from lxml import etree

# 创建存储图片目录
dirName = 'ImgLibs'
if not os.path.exists(dirName):
    os.mkdir(dirName)

# 主域名url，用作详情页拼接获取详细url
domain_url = 'https://pic.netbian.com'

# 主页面原始url，根据页码区分
url = 'https://pic.netbian.com/4kmeinv/index_%d.html'
for page in range(1, 6):    # 爬取前5页
    if page == 1:
        new_url = 'https://pic.netbian.com/4kmeinv/'
    else:
        new_url = format(url%page)

    proxies = {"http": None, "https": None}
    headers = {
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Safari/537.36'
    }
    # 使用requests的encoding属性避免中文乱码，utf-8不行就gbk
    response = requests.get(url=new_url, proxies=proxies, headers=headers)
    response.encoding = 'gbk'
    html = response.text
    # print(html)

    parser = etree.HTMLParser(encoding="gbk")     # parser参数避免html代码不规范
    tree = etree.HTML(html, parser=parser)    # 实例化etree对象
    main_location = tree.xpath('//div[@class="slist"]/ul/li')
    for son in main_location:
        # 获取图片标题
        title = son.xpath('./a/img/@alt')
        # 获取详情页url
        for i in son.xpath('./a/@href'):
            sou_url = domain_url + i
            # print(title, sou_url)

            # 同样获取详情页html内容
            res = requests.get(url=sou_url, proxies=proxies, headers=headers)
            res.encoding = 'gbk'
            son_html = res.text
            son_tree = etree.HTML(son_html, parser=parser)  # 再次实例化etree对象
            # 获取图片详细url
            son_url = domain_url + son_tree.xpath('//div[@class="photo-pic"]/a[@id="img"]/img/@src')[0]
            # print(son_url)

            # 下载保存
            img_data = requests.get(url=son_url, proxies=proxies, headers=headers).content
            imgPath = dirName + '/' + title[0] + '.jpg'
            with open(imgPath, 'wb') as f:
                f.write(img_data)
            print(title[0], '\t\t下载成功')
