# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import os
import re

from scrapy.pipelines.images import ImagesPipeline

from imagedownload.settings import IMAGES_STORE


class ImagedownloadPipeline(ImagesPipeline):

    def get_media_requests(self, item, info):
        media_requests = super(ImagedownloadPipeline, self).get_media_requests(item, info)
        for i in media_requests:
            i.item = item
        return media_requests

    def file_path(self, request, response=None, info=None):
        # 原始路径
        origin_path = super(ImagedownloadPipeline, self).file_path(request, response=None, info=None)
        title = request.item['title']
        new_title = re.sub(r'[\\/:\*\?"<>\|[]#]','',title)
        save_path  = os.path.join(IMAGES_STORE,new_title)
        # 判断文件是否存在
        if not os.path.exists(save_path):
            os.mkdir(save_path)

        # 文件名
        image_name = origin_path.replace('full/','')
        return os.path.join(save_path,image_name)