# -*- coding = utf-8 -*-
# @Time    : 2025/3/27 下午3:18
# @Author  : yqk
# @File    : 爬取美女壁纸.py
# @Software: PyCharm
import requests
from lxml import etree
import os
cookies = {
    'Hm_lvt_86200d30c9967d7eda64933a74748bac': '1743060049',
    'HMACCOUNT': 'E85503690916CA6F',
    't': '6e261f00966f376ed8eb80e4786afd00',
    'r': '3822',
    'Hm_lpvt_86200d30c9967d7eda64933a74748bac': '1743063905',
}

headers = {
    'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
    'accept-language': 'zh-CN,zh;q=0.9',
    'cache-control': 'max-age=0',
    'priority': 'u=0, i',
    'referer': 'https://mm.enterdesk.com/',
    'sec-ch-ua': '"Chromium";v="134", "Not:A-Brand";v="24", "Google Chrome";v="134"',
    'sec-ch-ua-mobile': '?0',
    'sec-ch-ua-platform': '"Windows"',
    'sec-fetch-dest': 'document',
    'sec-fetch-mode': 'navigate',
    'sec-fetch-site': 'same-origin',
    'sec-fetch-user': '?1',
    'upgrade-insecure-requests': '1',
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36',
    # 'cookie': 'Hm_lvt_86200d30c9967d7eda64933a74748bac=1743060049; HMACCOUNT=E85503690916CA6F; t=6e261f00966f376ed8eb80e4786afd00; r=3822; Hm_lpvt_86200d30c9967d7eda64933a74748bac=1743063905',
}
url = 'https://mm.enterdesk.com/bizhi/64629-351483.html'
def get_img(url, headers, cookies):
    response = requests.get(url, cookies=cookies, headers=headers)
    html = etree.HTML(response.text)
    img_url = html.xpath('//div[@class="swiper-slide"]//img/@src')
    title = html.xpath('//div[@class="swiper-slide"]//img/@title')
    return img_url, title

img_url, title = get_img(url, headers, cookies)

def download_img(img_url):
    img_url, title = get_img(img_url, headers, cookies)
    if not os.path.exists(f'图片/{title}'):
        os.mkdir(f'图片/{title}')
    for num, url in enumerate(img_url):
        response = requests.get(url, cookies=cookies, headers=headers)
        with open(f'图片/{title}/{num}.jpg', 'wb') as f:
            f.write(response.content)
            print(f'图片/{title}/{title}{num}下载完成')

# 爬取目录页的网页源代码
index_url = 'https://mm.enterdesk.com/'
def get_index_page(index_url, headers, cookies):

    r = requests.get(index_url, headers=headers, cookies=cookies)
    html = etree.HTML(r.text)
    links = html.xpath('//dl[@class="egeli_pic_dl"]//a/@href')
    return links


# 获取并下载指定页的图片
for i in range(1, 10):
    nex_links =f'https://mm.enterdesk.com/{i}.html'
    links = get_index_page(nex_links, headers, cookies)
    for link in links:
        download_img(link)
