# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
from typing import Any

import scrapy
# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
from scrapy import Request
from scrapy.http import Response
from scrapy.pipelines.files import FilesPipeline
from scrapy.pipelines.media import MediaPipeline


class Day14Pipeline:
    def process_item(self, item, spider):
        return item

class VideoDownloadPipeline(FilesPipeline):
    """
    继承FilesPipeline
    文件下载管道会将我们爬取到的文件item数据帮我们保存到指定文件中
    """
    def get_media_requests(
            self, item: Any, info: MediaPipeline.SpiderInfo
    ) -> list[Request]:
        if info.spider.name == 'pixabay_videos':
            yield scrapy.Request(url=item['url'], meta={'item': item})

    def file_path(
            self,
            request: Request,
            response: Response | None = None,
            info: MediaPipeline.SpiderInfo | None = None,
            *,
            item: Any = None,
    ) -> str:
        return request.meta['item']['name']
        #这里因为上面我们对Request请求加了额外的附属消息所以我们只要在request中meta里找
