# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
from itemadapter import ItemAdapter

import json

# 全局变量，写json
item_list = []


# setting 开启管道
class Scrapy02DangdangPipeline:
    def open_spider(self, spider):
        self.fp = open('book.json', 'w+', encoding='utf-8')
        # self.fp.write('[\n')

    def process_item(self, item, spider):
        # item 就是 yield 后的数据
        # self.fp.write(str(item).replace('\'', '\"'))

        # item_dict = {'src': item.get('src'), 'name': item.get('name'), 'price': item.get('price')}
        # self.fp.write(json.dumps(item_dict, ensure_ascii=False))
        # self.fp.write(',\n')

        item_dict = {'src': item.get('src'), 'name': item.get('name'), 'price': item.get('price')}
        item_list.append(item_dict)
        # print(item_list)

        return item

    def close_spider(self, spider):
        # self.fp.write(']')
        self.fp.write(json.dumps(item_list, ensure_ascii=False))
        self.fp.close()


import urllib.request


# setting 多条管道
class DangdangDownloadPipeline:

    def process_item(self, item, spider):
        # item 就是 yield 后的数据
        # https://img3m2.ddimg.cn/46/8/29124262-1_b_5.jpg
        url = 'https:' + item.get('src')
        filename = './books/' + item.get('name') + '.jpg'

        # 保存为 jpg
        # print(url)
        # urllib.request.urlretrieve(url, filename=filename)

        return item
