# -*- coding: utf-8 -*-
# @Time        :2025/3/6 17:08
# @Author      :文刀水寿
# @File        : 20_爬虫_解析_站长素材.py
"""
 @Description :
"""
import urllib.request
from lxml import etree


# https://m.sc.chinaz.com/tupian/rentiyishu.html
# https://sc.chinaz.com/tupian/rentiyishu_2.html
# https://sc.chinaz.com/tupian/rentiyishu_3.html


def create_request(page):
    headers = {
        'cookie': 'cz_statistics_visitor=94f38549-eaea-2f32-8c49-016b094bfc9c; _clck=1hgy45u%7C2%7Cftz%7C0%7C1891; Hm_lvt_dc79411433d5171fc5e72914df433002=1741252183; _clsk=1mrgv16%7C1741271806382%7C2%7C1%7Cp.clarity.ms%2Fcollect; Hm_lvt_398913ed58c9e7dfe9695953fb7b6799=1741252138,1741308872,1741308879; HMACCOUNT=71C917125BE5E21C; Hm_lpvt_398913ed58c9e7dfe9695953fb7b6799=1741311859',
        'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Mobile Safari/537.36 Edg/133.0.0.0',
    }
    if page == 1:
        url = 'https://sc.chinaz.com/tupian/rentiyishu.html'
    else:
        url = 'https://sc.chinaz.com/tupian/rentiyishu_' + str(page) + '.html'
    request = urllib.request.Request(url=url, headers=headers)
    return request


def get_content(requestt):
    response = urllib.request.urlopen(requestt)
    content = response.read().decode("utf8")
    return content


def down_load(content):
    tree = etree.HTML(content)
    # 图片名称
    name_list = tree.xpath("//div[@class='container']//img/@alt")
    src_list = tree.xpath("//div[@class='container']//img/@data-original")

    for i in range(len(name_list)):
        name = name_list[i]
        src = src_list[i]
        url = 'https:' + src
        # 下载
        urllib.request.urlretrieve(url,
                                   filename='D:\PythonCharm\PyCharmProject\pythonProject\love_img/' + name + '.jpg')


if __name__ == '__main__':
    start_page = int(input("请输入起始页码"))
    end_page = int(input("请输入结束页码"))

    for page in range(start_page, end_page + 1):
        # 请求网页的定制
        requestt = create_request(page)
        # 获取网页源码
        content = get_content(requestt)
        # 下载图片
        down_load(content)
