import scrapy
import requests
from PIL import Image
from io import BytesIO
from pathlib import Path


class ImageSpider(scrapy.Spider):
    name = "image_spider"
    start_urls = ['https://www.2022wyt.com/photo/20231228/54549.html']  # 替换成你要爬取的网站的URL

    def parse(self, response):
        # 提取图片链接
        image_links = response.css('img::attr(src)').extract()
        self.logger.info(f"Found {len(image_links)} image links: {image_links}")

        # 迭代处理每个图片链接
        for image_link in image_links:
            yield scrapy.Request(response.urljoin(image_link), callback=self.parse_image)

    def parse_image(self, response):
        # 检查响应是否成功
        if response.status != 200:
            self.logger.error(f"Failed to fetch image: {response.url}. Status code: {response.status}")
            return

        # 获取图片URL
        image_url = response.url
        self.logger.info(f"Successfully fetched image: {image_url}")

        # 下载图片并保存
        self.download_and_convert_image(image_url)

    def download_and_convert_image(self, image_url):
        # 获取图片内容
        image_response = requests.get(image_url)
        if image_response.status_code != 200:
            self.logger.error(f"Failed to download image: {image_url}. Status code: {image_response.status_code}")
            return

        # 将图片内容转换为PIL Image对象
        try:
            image = Image.open(BytesIO(image_response.content))
        except Exception as e:
            self.logger.error(f"Failed to convert image: {image_url}. Error: {e}")
            return

        # 确定保存路径和文件名
        image_name = image_url.split("/")[-1]
        save_path = f"images/{image_name.replace('.webp', '.png')}"  # 将图片转换为png格式

        # 确保保存目录存在
        Path("images").mkdir(parents=True, exist_ok=True)

        # 保存图片
        image.save(save_path)

        self.logger.info(f"Image downloaded and converted: {save_path}")
