import urllib.request

from lxml import etree


# （1） 请求对象的定制
# （2） 获取网页的源码
# （3） 下载

def create_request(page):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/105.0.0.0 Safari/537.36'
    }
    if(page==1):
        url='https://sc.chinaz.com/tupian/qinglvtupian.html'
    else:
        url = 'https://sc.chinaz.com/tupian/qinglvtupian_'+str(page)+'.html'
    # print(url)

    return request


# 改进：使用代理
def get_content(request):
    proxies = {
        'http': '120.194.55.139:	6969'
    }
    handler = urllib.request.ProxyHandler(proxies=proxies)
    opener = urllib.request.build_opener(handler)
    response = opener.open(request)

    # response = urllib.request.urlopen(request)
    content = response.read().decode('utf-8')
    return content


def down_load(content):
    # 下载图片
    #  urllib.
    tree = etree.HTML(content)
    # 通过xpath 获取图片名字
    # 实际下载的网页结构有所变化 ，注意分析
    # name_list = tree.xpath('//div[@class="container"]//div[@class="tupian-list com-img-txt-list masonry"]//div[@class="item masonry-brick"]//img//@alt')
    name_list = tree.xpath('//div[@class="container"]//div[@class="tupian-list com-img-txt-list"]//div[@class="item"]//img//@alt')
    # for name in name_list:
    #     print(name)

    # 试着爬取图片地址
    # 注意： 一般设计图片的网站都会进行懒加载
    link_list = tree.xpath('//div[@class="container"]//div[@class="tupian-list com-img-txt-list"]//div[@class="item"]//img//@data-original')
    # for link in link_list:
    #     print(link)
    for i in range(len(name_list)):
        name = name_list[i]
        link = link_list[i]
        url = 'https:'+link   # 拼接出 可以直接访问的链接
        print(name,url)
        # 下载图片
        # 优化： 下载到指定的文件夹目录下
        urllib.request.urlretrieve(url=url,filename='./test_file/'+name+'.jpg')



# 需求 下载的前十页的图片

if __name__ == '__main__':
    start_page = int(input("请输入起始页码"))
    end_page = int(input("请输入结束页码"))

    for page in range(start_page,end_page+1):
        # (1) 请求对象的定制
        request = create_request(page)
        # (2) 获取网页源码
        content = get_content(request)
        # (3) 下载
        down_load(content)