import os

import requests
from bs4 import BeautifulSoup

url = "https://umei.net"
response = requests.get(url)
response.encoding = 'utf-8'
# print(response.text)
# 初始化BeautifulSoup对象
page = BeautifulSoup(response.text, "html.parser")
ul = page.find("ul", attrs={"class": "update_area_lists cl"})
# print(ul)
li_list = ul.find_all("li")[0:2]
for li in li_list:
    a = li.find("a")
    title = a.get("title")
    href = a.get("href")

    # 创建本地文件夹
    os.makedirs(title, exist_ok=True)
    num = 1
    while True:
        # 获取图片详情页
        detail_url = url + href
        print(f"请求url:{detail_url}")
        child_response = requests.get(detail_url)
        child_response.encoding = "utf-8"
        child_page = BeautifulSoup(child_response.text, "html.parser")

        # 图片地址
        image_div = child_page.find("div", attrs={"class": "image_div"})
        p = image_div.find("p")
        a_p = p.find("a")
        if a_p.find("img"):
            img_src = a_p.find("img").get("src")

            img_respo = requests.get(img_src)
            with open(f'{title}/{num}.jpg', "wb") as o_f:
                o_f.write(img_respo.content)
            num += 1
            # 判断是否包含下一页
            img_page = child_page.find("div", attrs={"class": "nav-links page_imges"})
            a_page_list = img_page.find_all("a")
            page_info = []
            for a in a_page_list:
                # print(a.text)
                page_info.append(a.text)
                if '下一页' in a.text:
                  href = a_p.get("href")
                  # print(f"href:{href}")

            print(num, page_info)
            if '下一页' not in page_info:
                break
        else:
            break
