# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
from itemadapter import ItemAdapter


class BookPipeline:
    def process_item(self, item, spider):
        with open("mybooks.txt","a",encoding="utf-8") as f:
            one = item["name"] + ";" + item["price"] + ";" + item["img_url"] + "\n"
            f.write(one)
        return item

from scrapy.pipelines.images import ImagesPipeline #下载图片的管道
from scrapy import Request
from scrapy.exceptions import DropItem #报错异常
import logging #管理日志

logger = logging.getLogger("SaveImagePipeline")
#图片下载管道 继承于ImagesPipeline
class SaveImagePipeline(ImagesPipeline):
    def get_media_requests(self, item, info): #下载图片的请求
        yield Request(url = item["img_url"])

    #可以不写
    def item_completed(self, results, item, info): #判断是否正确下载
        if not results[0][0]:
            raise DropItem("下载失败")
        #打印日志
        logger.debug("下载成功")
        return item

    #重写文件保存路径
    def file_path(self, request, response=None, info=None, *, item=None):
        #返回图片名称
        return request.url.split("/")[-1]