# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
from scrapy.pipelines.files import FilesPipeline
from urllib.parse import urlparse
from os.path import basename, dirname, join
from os import mkdir
from matplotlib_examples import settings


class MatplotlibExamplesPipeline(FilesPipeline):
    def file_path(self, request, response=None, info=None):
        # files_store = settings.FILES_STORE
        path = urlparse(request.url).path  # 解析文件分为目录和文件名
        # basename(dirname(path)):获取文件目录的最后一个名字--》分类的名字
        # basename(path)：获取文件的最后一个名字
        # if not path.exists(basename(dirname(path))):
        #     mkdir(basename(dirname(path)))
        # 可能这边的文件路径写错了
        # return join(files_store, join(basename(dirname(path)), basename(path)))
        return join(basename(dirname(path)), basename(path))
