# Author:Yiye
# -*- codeing = utf-8 -*-
# @Time :2021/11/19 23:44
# @Author:李卓
# @Site : 
# @File : 站长素材.py
# @Software : PyCharm


# (1) 请求对象的定制
# （2）获取网页的源码
# （3）下载


# 需求 下载的前十页的图片
# https://sc.chinaz.com/tupian/qinglvtupian.html   1
# https://sc.chinaz.com/tupian/qinglvtupian_page.html

import urllib.request
from lxml import etree

def create_request(page):
    if(page == 1):
        url = 'https://sc.chinaz.com/tupian/xixirenti.html'
    else:
        url = 'https://sc.chinaz.com/tupian/xixirenti_' + str(page) + '.html'

    headers = {
        "Cookie":"___rl__test__cookies=1637339572711; qHistory=aHR0cDovL3Rvb2wuY2hpbmF6LmNvbV/nq5nplb/lt6Xlhbd8aHR0cDovL3BpbmcuY2hpbmF6LmNvbV9QaW5n5qOA5rWL; UM_distinctid=17d38dcef73709-0bf96f8493ef5d-561a1154-144000-17d38dcef74960; Hm_lvt_398913ed58c9e7dfe9695953fb7b6799=1637336554,1637336576; OUTFOX_SEARCH_USER_ID_NCOO=1340951664.3775601; CNZZDATA300636=cnzz_eid=1071343354-1637330612-https%3A%2F%2Fwww.baidu.com%2F&ntime=1637341413; ASP.NET_SessionId=wvawzgugawh2r1izgnwtpevl; Hm_lpvt_398913ed58c9e7dfe9695953fb7b6799=1637344131"
    }

    request = urllib.request.Request(url = url, headers = headers)
    return request

def get_content(request):
    response = urllib.request.urlopen(request)
    content = response.read().decode('utf-8')
    return content


def down_load(content):
#     下载图片
    # urllib.request.urlretrieve('图片地址','文件的名字')
    tree = etree.HTML(content)

    name_list = tree.xpath('//div[@id="container"]//a/img/@alt')

    # 一般设计图片的网站都会进行懒加载
    src_list = tree.xpath('//div[@id="container"]//a/img/@src2')

    for i in range(len(name_list)):
        name = name_list[i]
        src = src_list[i]
        url = 'https:' + src

        urllib.request.urlretrieve(url=url,filename='./a/' + name + '.jpg')




if __name__ == '__main__':
    start_page = int(input('请输入起始页码'))
    end_page = int(input('请输入结束页码'))

    for page in range(start_page,end_page+1):
        # (1) 请求对象的定制
        request = create_request(page)
        # （2）获取网页的源码
        content = get_content(request)
        # （3）下载
        down_load(content)


        ## minio列出所有桶的名字，把名字返回，链接返回，把他们生成到数据库里，放在本地
