"""
# coding     : utf-8 
# Time       : 2025/6/9 16:13
# Author     : chenxianb
# version    : python 3.8.2
# Description: 获取彼岸图网上的图片
"""
import os.path
import time

import requests
from bs4 import BeautifulSoup
import re


def get_html_text(url):
    header = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:139.0) Gecko/20100101 Firefox/139.0",
              "Host": "pic.netbian.com",
              "Cookie": "zkhanecookieclassrecord=%2C54%2C"}
    try:
        r = requests.get(url, timeout=30, headers=header)
        r.raise_for_status()
        r.encoding = r.apparent_encoding
        return r.text
    except:
        return ""


def get_url(html):
    """获取每个图片的保存地址、图片名称"""
    # 获取每个图片的详情页面地址
    pic_list = []
    soup = BeautifulSoup(html, "html.parser")
    text = soup.find(class_="slist")
    for item in text.find_all("a"):
        path = item.get("href")  # 获取href属性的内容
        # title = item.text
        time.sleep(1)
        url = f"https://pic.netbian.com{path}"  # 页面详情地址

        # 获取详情页面中图片的存储地址
        htm = get_html_text(url)
        soup1 = BeautifulSoup(htm, "html.parser")
        text1 = soup1.find(id="img")
        pic_path = text1.img["src"]
        pic_name = text1.img["title"]
        pic_url = f"https://pic.netbian.com{pic_path}"
        time.sleep(1)
        pic_list.append([pic_name, pic_url])

    return pic_list


def save_pic(info, filepath):
    """保存图片到指定位置"""
    if not os.path.exists(filepath):
        os.mkdir(filepath)

    for item in info:
        name = item[0]
        url = item[1]
        res = requests.get(url)

        img_path = f"{filepath}/{name}.jpg"
        with open(img_path, "wb") as f:
            f.write(res.content)
            print(f"{name} 下载成功")
            time.sleep(1)


if __name__ == '__main__':
    url = "https://pic.netbian.com/4kmeinv/index.html"
    filepath = "./pic/"
    html = get_html_text(url)
    lst = get_url(html)
    save_pic(lst, filepath)
