# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
import urllib.request

# useful for handling different item types with a single interface
from itemadapter import ItemAdapter


# 如果需要使用pipeline，需要现在setting中开启
class ScrapyStudy04DangdangPipeline:
    # 爬虫文件执行之前
    def open_spider(self, spider):
        self.fp = open("book.json", "w", encoding="utf-8")

    # item: yield 后面的book对象
    def process_item(self, item, spider):
        # （1）write里面必须传入字符串，所以需要先转成字符串
        # （2）w模式，每次写入都会覆盖之前的数据 所以把w改成a
        # 有缺点：每次写入都会打开文件，然后关闭文件，效率不高
        # with open("book.json", "a", encoding="utf-8") as f:
        #     f.write(str(item))
        # return item

        # 优化版
        self.fp.write(str(item))
        return item

    # 爬虫文件执行之后
    def close_spider(self, spider):
        self.fp.close()


# 开启多条管道
# （1）定义广告类
# （2）在setting中开启管道
class ScrapyDangdangDownloadPipeline:
    # 这个方法源自上面这个类的process_item方法
    def process_item(self, item, spider):
        url = item.get("img_url")
        filename = './books/' + item.get("title") + ".jpg"
        urllib.request.urlretrieve(url, filename)

        return item
