import requests
from bs4 import BeautifulSoup
import os
from urllib.parse import urljoin
import time

def download_images():
    save_dir = r"d:/io/img"
    if not os.path.exists(save_dir):
        os.mkdir(save_dir)
    headers = {
        'user-agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36 Edg/133.0.0.0"
    }
    url = "https://www.yn.gov.cn/"
    try:
        response = requests.get(url, headers=headers)
        response.raise_for_status()
        response.encoding = "utf-8"
        soup = BeautifulSoup(response.text, "html.parser")
        img_tags = soup.findAll('img')
        for i, img in enumerate(img_tags):
            img_url = img.get('src')

            if not img_url:
                # 处理相对链接
                continue
            img_url = urljoin(url, img_url)
            img_ext = os.path.splitext(img_url)[1]
            if not img_ext:
                img_ext = ".jpg"
            file_name = f'image_{i}{img_ext}'
            file_path = os.path.join(save_dir, file_name)
            try:
                    # 下载图片
                    img_response = requests.get(img_url, headers=headers)
                    img_response.raise_for_status()
                    # 生成图片文件名
                    file_name = os.path.join(save_dir, f'image_{i}.jpg')
                    with open(file_path, 'wb') as f:
                        f.write(img_response.content)
                    print(f"成功下载图片: {file_name}")
                    time.sleep(0.5)
            except Exception as e:
                    continue
    except Exception as e:
        print(f"访问网站失败: {str(e)}")


if __name__ == '__main__':
    download_images()