# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
import os
from urllib import request
from scrapy.pipelines.images import ImagesPipeline
from bmw import settings

class BmwPipeline:
    def __init__(self):
        nowfile = os.path.dirname(__file__)  # 当前运行脚本的所在目录
        sjfile = os.path.dirname(nowfile)  # 当前运行脚本所在目录的上级目录
        self.path = os.path.join(sjfile, 'images')  # 拼接 image文件夹 的绝对路径
        if not os.path.exists(self.path):  # 判断是否有 image_file 这个路径，如果没有则创建
            os.mkdir(self.path)

    def process_item(self, item, spider):
        title = item['title']
        img_urls = item['img_urls']

        title_path = os.path.join(self.path, title)
        if not os.path.exists(title_path):
            os.mkdir(title_path)
        for img_url in img_urls:
            img_name = img_url.split('_')[-1]
            request.urlretrieve(img_url, os.path.join(title_path, img_name))

        return item

class BMWImagesPipeline(ImagesPipeline):
    def get_media_requests(self, item, info):
        request_objs = super(BMWImagesPipeline, self).get_media_requests(item, info)
        for request_obj in request_objs:
            request_obj.item = item
        return request_objs

    def file_path(self, request, response=None, info=None):
        path = super(BMWImagesPipeline, self).file_path(request, response, info)
        title = request.item.get('title')

        images_store = settings.IMAGES_STORE
        title_path = os.path.join(images_store, title)

        if not os.path.exists(title_path):
            os.mkdir(title_path)

        image_name = path.replace('full/', '')
        image_path = os.path.join(title_path, image_name)

        return image_path


