# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
import json
from scrapy import Request
from scrapy.exceptions import DropItem
from scrapy.pipelines.images import ImagesPipeline
from scrapy import Request
import os, time


# pipelines中，优先级高的类会获得item的优先处理权，进而传递给下一个类进行处理
# 如果自定义了image类
# 在setting的ITEM_PIPELINES中需要引入自定义的类名称 而非 scrapy.pipelines.images.ImagesPipeline
class ImagePipesline(ImagesPipeline):
    def get_media_requests(self, item, info):
        data = dict(item)
        if data['itemID'] == 'PicItem':
            name = data['id']
            imglink = data['image_urls']
            print(f"正在下载{name}")
            for i in imglink:
                yield Request(i, meta={'name': name})

    # 返回文件名
    def file_path(self, request, response=None, info=None, *, item=None):
        img_name = request.url.split('/')[-1]
        return img_name
        # 返回下一个要被执行的管道

    def item_completed(self, results, item, info):
        return item


class WallhavenPipeline:
    def __init__(self):
        self.f = open('abc.json', 'w')
        self.name = ""
        self.data = ""
        self.url = ""
        self.startTime = time.perf_counter()
        self.endTime = 0

    def process_item(self, item, spider):
        # print("下载完成")
        data = dict(item)
        # 判断item，并存入json文件
        if data['itemID'] == 'item':
            id = data['id']
            print(f"{id}下载完成")
            content = json.dumps(data, ensure_ascii=False) + ",\n"
            self.f.write(content)
        return item

    def close_spider(self, spider):
        self.endTime = time.perf_counter()
        delta = self.startTime = self.endTime
        print(f"总共用时{delta}秒")
        self.f.close()
