
# https://www.163.com/dy/article/JBLN6JTD053410UP.html

import urllib.request
from lxml import etree

def create_request(url):
    headers = {
        'Cookie': 'BIDUPSID=E04D67B5F25C419FF5CAC9517D0526B4; PSTM=1733134135; BAIDUID=E04D67B5F25C419F6775388B1B95BE63:FG=1; BD_UPN=12314753; H_WISE_SIDS_BFESS=60278_61027_60853_61610_61693_61780_61823_61844_61987; BAIDUID_BFESS=E04D67B5F25C419F6775388B1B95BE63:FG=1; BA_HECTOR=a181800g042k81210h2k05ag2nbdre1jp1jqb1v; Hm_lvt_aec699bb6442ba076c8981c6dc490771=1736987101,1737193278,1737460267,1737543515; ZFY=aur9HrpWYrn7Jw8b8ogzBa:AcXNMsyOna:AgftxeHR2dA:C; B64_BOT=1; baikeVisitId=b013de5f-a221-42f3-8f22-8ea63adf36d5; COOKIE_SESSION=498_0_9_8_10_7_1_0_8_5_38_0_489_0_4_0_1737548927_0_1737548923%7C9%2377433_15_1736991585%7C5; H_PS_PSSID=60278_61027_60853_61693_61780_61823_61844_61987; H_WISE_SIDS=60278_61027_60853_61693_61780_61823_61844_61987; BDORZ=B490B5EBF6F3CD402E515D22BCDA1598; BD_HOME=1',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36'
    }
    requerst = urllib.request.Request(url=url,headers=headers)
    return requerst

def get_content(request):
    # 模拟浏览器访问服务器
    # response = urllib.request.urlopen(request)

    # 代理
    proxies = {
        'http': '101.200.75.55:80' # 北京
    }
    # handler
    handler = urllib.request.ProxyHandler(proxies=proxies)
    opener = urllib.request.build_opener(handler)
    response = opener.open(request)

    content = response.read().decode('utf8')
    return content

def download(content):
#     解析
    tree = etree.HTML(content)
    src = tree.xpath("//p[@class='f_center']/img/@src")
    for i in range(len(src)):
        # print(src[i])
#     下载图片
        filename = './download/wallpaper/phone' + str(i+1) + '.jpg'
        urllib.request.urlretrieve(src[i],filename)
        print("下载成功" + str(i+1) + "个")

if __name__ == '__main__':
    url = 'https://www.163.com/dy/article/JBLN6JTD053410UP.html'
#     定制请求头
    request = create_request(url)
#     模拟浏览器向服务器发请求获取页面
    content = get_content(request)
    # print(content)
#     下载图片
    download(content)















