# https://www.51miz.com/so-sucai/1637469.html
# https://www.51miz.com/so-sucai/1637469/p_2/

# //a[@class='image-box']/img/@alt
# //a[@class='image-box']/img/@src

import urllib.request
from lxml import etree

def getPage(page):
    if(page == 1):
        link = 'https://www.51miz.com/so-sucai/1637469.html'
    else:
        link = 'https://www.51miz.com/so-sucai/1637469/p_' + str(page) + '/'
    return link

# 请求对象的定制
def create_request(url):
    headers = {
        'Cookie': 'BIDUPSID=E04D67B5F25C419FF5CAC9517D0526B4; PSTM=1733134135; BAIDUID=E04D67B5F25C419F6775388B1B95BE63:FG=1; BD_UPN=12314753; H_WISE_SIDS_BFESS=60278_61027_60853_61610_61693_61780_61823_61844_61987; BAIDUID_BFESS=E04D67B5F25C419F6775388B1B95BE63:FG=1; BA_HECTOR=a181800g042k81210h2k05ag2nbdre1jp1jqb1v; Hm_lvt_aec699bb6442ba076c8981c6dc490771=1736987101,1737193278,1737460267,1737543515; ZFY=aur9HrpWYrn7Jw8b8ogzBa:AcXNMsyOna:AgftxeHR2dA:C; B64_BOT=1; baikeVisitId=b013de5f-a221-42f3-8f22-8ea63adf36d5; COOKIE_SESSION=498_0_9_8_10_7_1_0_8_5_38_0_489_0_4_0_1737548927_0_1737548923%7C9%2377433_15_1736991585%7C5; H_PS_PSSID=60278_61027_60853_61693_61780_61823_61844_61987; H_WISE_SIDS=60278_61027_60853_61693_61780_61823_61844_61987; BDORZ=B490B5EBF6F3CD402E515D22BCDA1598; BD_HOME=1',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36'
    }
    request = urllib.request.Request(url=url,headers=headers)
    return request

def get_content(request):
#     模拟浏览器访问服务器
    response = urllib.request.urlopen(request)
    content = response.read().decode('utf-8')
    return content

def download(content):
    tree = etree.HTML(content)
    name = tree.xpath("//a[@class='image-box']/img/@alt")
    url = tree.xpath("//a[@class='image-box']/img/@data-layzeload")
    # print(len(name))
    # print(len(url))
    for index in range(len(name)):
        src = "https:" + url[index]
        print(src)
#         图片下载
        filename = './download/wallpaper/' + name[index] + str(index) + '.png'
        print(filename)
        # urllib.request.urlretrieve(src, filename)
        # print("下载成功")


if __name__ == '__main__':
    page_start = 1
    page_end = 1

    for page in range(page_start,page_end+1):
        url = getPage(page)
#       请求对象定制
        request = create_request(url)
#       模拟浏览器访问服务器
        content = get_content(request)
#        下载
        download(content)


