# 图片下载
import requests
from bs4 import BeautifulSoup
import os
from urllib.parse import urljoin

# exist_ok=True表示如果目录已经存在，则不会再创建此文件夹
os.makedirs("F:\\python_workspace\\imgs", exist_ok=True)
# 设置 headers 模拟浏览器访问                   本文件作废#################################################################
headers = {
    'User-Agent': 'Mozilla/5.0'
}
try:
    base_url = "https://www.cnphotos.net/"
    response = requests.get("https://www.cnphotos.net/photographs/show-1564.html", headers=headers, timeout=10)
    response.raise_for_status()
    response.encoding = "utf-8"
    soup = BeautifulSoup(response.text, "html.parser")  # 获取html文件中的所有的代码内容(将文件中所有的text解析为html代码)
    img_list = soup.find_all('img')

    downloaded_urls = set()
    for img in img_list:
        img_url = None
        # 方法3：检查属性是否存在
        if 'src' in img.attrs:
            img_url = img['src']
            if img_url.startswith('//'):
                img_url = 'https:' + img_url
            elif not img_url:
                continue
        elif 'default_image_url' in img.attrs:
            img_url = img.get('default_image_url')
        # 构造完整的 URL
        full_img_url = urljoin(base_url, img_url)
        # 去重防止重复下载
        if full_img_url in downloaded_urls:
            continue

        downloaded_urls.add(full_img_url)

        # img_url = img.get('src') 安全获取方式
        # img_url = img['src']
        img_name = img.get('alt', '').strip()


        try:
            img_response=requests.get(full_img_url, headers=headers, timeout=10) # 单独发请求获取图片(用于判断图片后缀)
            img_response.raise_for_status()
            content_type = img_response.headers.get('Content-Type', '')
            ext = '.jpg'
            if 'image/png' in content_type:
                ext = '.png'
            elif 'image/gif' in content_type:
                ext = '.gif'
            safe_alt = "".join(c for c in img_name if c.isalnum() or c in " _-").rstrip()
            safe_img_name = f"{safe_alt}{ext}"
            with open("F:\\python_workspace\\imgs\\%s" % safe_img_name, 'wb') as file:
                img_data = requests.get(img_url)
                file.write(img_data.content)
                print("%s下载完成" % safe_img_name)
            # filepath = os.path.join("F:\\python_workspace\\imgs\\", safe_img_name)
            # if img_url:  # 判断img_name是否为空(alt非空才获取图片的url)
            #     img_data = requests.get(img_url)
            #
            #     content_type = response.headers.get('Content-Type', '')
            #     ext = '.jpg'
            #     if 'image/png' in content_type:
            #         ext = '.png'
            #     elif 'image/gif' in content_type:
            #         ext = '.gif'

                # 生成器表达式联合过滤字符表达式：c for c in img_name  if 条件 or 条件  (从图片名中取出一个字符用c表示，
                # 同时用过滤条件只保留满足isalnum()(字母或数字字符) 或者满足 "_-"的字符)

                # trip(): 去除首尾空白字符
        except Exception as e:
            print(f"图片{full_img_url}下载失败: {e}")


except Exception as e:
    print(f"获取网页内容失败: {e}")
    # 安装requests：  pip install requests
    # 安装BeautifulSoup：  pip install beautifulsoup4 lxml  (用于解析html文件)
