import requests
from bs4 import BeautifulSoup
import os

# 目标图片数量，可根据需求自行调整
target_image_count = 350
# 初始网页的URL，后续会根据页码拼接出不同分页的URL
#base_url = "https://www.vcg.com/creative-image/jiayangsongshu/"
#base_url = "https://www.vcg.com/creative-image/chongwuji/"
#base_url = "https://www.vcg.com/creative-image/chongwuya/"
#base_url = "https://www.vcg.com/creative-image/chongwuzhu/"
base_url = "https://www.vcg.com/creative-image/jiayangzhu/"



#base_url = "https://www.vcg.com/creative-image/chongwucangshu/"
#base_url = "https://www.vcg.com/creative-image/jiayangtuzi/"

#帮给下面每一种爬300张图，清晰度>720P即可：
#1、宠物兔子
#2、宠物仓鼠
#3、宠物松鼠
#4、宠物鸡
#5、宠物鸭
#6、宠物猪


# 定义保存图片的本地目录，可根据需求修改
#save_dir = "data_pet_songshu"
#save_dir = "data_pet_ji"
#save_dir = "data_pet_ya"
#save_dir = "data_pet_zhu"
save_dir = "data_pet_zhu2"



#save_dir = "chongwucangshu"
#save_dir = "jiayangtuzi"



# 当前已下载的图片数量
current_image_count = 0
# 模拟浏览器请求头，可根据实际情况适当调整，有助于更好地获取网页内容
headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36"
}


page_number = 1

while current_image_count < target_image_count:
    # 拼接当前页码对应的URL
    url = base_url + "?page=" + str(page_number)
    try:
        # 发送GET请求获取网页内容
        response = requests.get(url, headers=headers)
        response.raise_for_status()
        # 使用BeautifulSoup解析网页内容，这里使用html.parser作为解析器，你也可以根据需求换用lxml等解析器
        soup = BeautifulSoup(response.text, 'html.parser')

        # 用于存储提取到的图片链接
        image_urls = []
        # 根据HTML结构，查找所有class为lazyload_hk且data-src属性符合特定格式的img标签（这里按照你示例的结构来查找）
        img_tags = soup.find_all('img', class_='lazyload_hk', attrs={'data-src': lambda x: x and x.startswith('//vcg')})
        for img in img_tags:
            # 获取data-src属性的值作为图片链接，并添加协议头（原链接是相对路径形式，补充协议使其成为完整URL）
            image_url = "https:" + img.get('data-src')
            image_urls.append(image_url)

        
        if not os.path.exists(save_dir):
            os.makedirs(save_dir)

        # 遍历图片链接列表，逐个下载图片，并更新已下载图片数量
        for index, image_url in enumerate(image_urls):
            if current_image_count >= target_image_count:
                break
            try:
                # 发送GET请求获取图片内容
                img_response = requests.get(image_url)
                img_response.raise_for_status()
                # 构建本地保存的文件名，这里简单用序号命名，你也可以根据图片链接中的相关信息生成更合适的文件名
                save_path = os.path.join(save_dir, f"image_{current_image_count + 1}.jpg")
                with open(save_path, 'wb') as f:
                    f.write(img_response.content)
                print(f"图片 {current_image_count + 1} 下载成功，已保存至 {save_path}")
                current_image_count += 1
            except requests.RequestException as e:
                print(f"下载图片 {current_image_count + 1} 时出现错误: {e}")
    except requests.RequestException as e:
        print(f"获取网页 {page_number} 内容出现错误:", e)

    # 翻页，增加页码数字
    page_number += 1

print(f"已成功下载 {current_image_count} 张图片，达到目标数量。")