# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
from itemadapter import ItemAdapter

#如果想使用管道，需要在setting中开启管道:
# ITEM_PIPELINES = {
#    "scrapy_dangdang_095.pipelines.ScrapyDangdang095Pipeline": 300,
# }

class ScrapyDangdang095Pipeline:
    #格式和名字都不可以改变，这个是在爬虫文件开始之前就执行的方法
    def open_spider(self,spider):
        print('++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++')
        self.fp = open('book.json','w',encoding = 'utf-8')

    #一下这种模式不推荐，因为每传递过来一个对象，就打开一次文件，操作过于频繁
    #item 就是yield后边book对象
    def process_item(self, item, spider):
        #write方法必须传入字符串，不能是其他对象
        #'w'是覆盖内容，需要填'a'
        # with open('book.json','a',encoding = 'utf-8') as fp:
        #     fp.write(str(item))
        self.fp.write(str(item))

        return item

    # 格式和名字都不可以改变，这个是在爬虫文件开始之后执行的方法
    def close_spider(self,spider):
        print('--------------------------------------------------------------------------------------')
        self.fp.close()



import urllib.request

#多条管道开启
# (1)定义管道类
# (2)在settings中开启管道

# ITEM_PIPELINES = {
#    #管道可以有很多个， 优先级范围1-1000 ，值越小优先级越高
#    "scrapy_dangdang_095.pipelines.ScrapyDangdang095Pipeline": 300,
#
#    #开启DangDangDownlosdPipeline
#    "scrapy_dangdang_095.pipelines.DangDangDownlosdPipeline":301
# }
class DangDangDownlosdPipeline:
    def process_item(self,item,spider):
        url = 'http:' + item.get('src')
        filename = './books' + item.get('name') + '.jpg'
        urllib.request.urlretrieve(url = url,filename = filename)


        return item
