# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
from itemadapter import ItemAdapter

# 若要使用管道，就要在settings.py 中开启管道；
class ScrapyDangdangPipeline:

    # 在爬虫文件开始之前，执行的方法
    def open_spider(self, spider):
        self.fp = open('book.json', 'w', encoding='utf-8')
        print('++++++++++++++++++++++++++++++++++++')



    def process_item(self, item, spider):

        # 注意：一下方式不推荐使用，因为文件打开关闭太频繁，每次写入就打开！
        # 表示 必须为字符串，不能是对象！ra
        # TypeError: write() argument must be str, not ScrapyDangdangItem
        # with open('book.json', 'a', encoding='utf-8') as fp:
        #     # fp.write(item)  # TypeError: write() argument must be str, not ScrapyDangdangItem
        #     fp.write(str(item)) # str(item) 将对象转字符串

        self.fp.write(str(item))

        return item

    # 在爬虫文件执行之后，执行的方法
    def close_spider(self, spider):
        self.fp.close()
        print('-------------------------------------')



import urllib.request
# 开启多条管道
# 1.自定义管道类
# 2.将自定义的管道类，在settings中开启
class DangdangDownloadPipeline:
    def process_item(self, item, spider):

        url = 'http:'+item.get("img")
        filename = './books/'+ item.get('name')+'.jpg'
        print("DangdangDownloadPipeline.process_item", filename)
        # 网不好，下载本地图片暂时注释掉！
        # urllib.request.urlretrieve(url=url, filename=filename)
        return item

