from lxml import etree
import urllib.request
import urllib.parse

# 解析本地：etree.parse()
# 解析网页文件：etree.HTML()


# https://sc.chinaz.com/tupian/index_2.html
# https://sc.chinaz.com/tupian/index.html
# 1. 获取前十页网页
for i in range(1, 2):
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36"}
    if i == 1:
        url = 'https://sc.chinaz.com/tupian/index.html'
    else:
        page = urllib.parse.quote(str(i))
        url = f'https://sc.chinaz.com/tupian/index_{page}.html'
    my_request = urllib.request.Request(url=url, headers=headers)
    response = urllib.request.urlopen(my_request)
    content = response.read().decode("utf-8")
    # print(content)
    # 2. 获取图片地址
    # //div[@class="item"]/img/@data-original
    # 一般涉及到图片的网站会涉及懒加载->滑动到这个位置再去加载 爬虫要用懒加载之前的数据，最开始 网页刚刚加载完服务器返回给你的标签以及对应的数据
    tree = etree.HTML(content)
    img_list = tree.xpath('//div[@class="item"]/img/@data-original')
    alt_list = tree.xpath('//div[@class="container"]//img/@alt')
    img_list2 = []
    for img, alt in zip(img_list, alt_list):
        img_list2.append([alt, "https:" + img])
    print(img_list2)
    print(f"**************************************第{i}次**************************************************")
    # 3. 下载图片
    for x in img_list2:
        name = x[0]
        src = x[1]
        urllib.request.urlretrieve(url=src, filename=name + '.jpg')
