# -*- coding: utf-8 -*-

from scrapy.pipelines.images import ImagesPipeline
from fangtianxia_scrapy.settings import IMAGES_STORE
from time import sleep
import scrapy
import os

class FangtianxiaScrapyImagePipeline(ImagesPipeline):
    def get_media_requests(self, item, info):
        for i in [[urls,category] for urls,category in zip(item.get('image_urls'),[item.get('category')])]:
            print('这是piplines解析数据：',i)
            print('这是pipelines解析url:',i[0])
            print('这是pipelins解析分类:',i[1])
            yield scrapy.Request('https:'+i[0],meta={'category':i[1]})

    def file_path(self, request, response=None, info=None):
        print('这是file_path_url:',request.url)
        print('这是file_path_category:',request.meta)

        category = request.meta.get('category').strip()
        print('这是file_path解析category:',category)
        print('开始创建分类文件夹...')
        images_store = IMAGES_STORE
        cate_path = os.path.join(images_store,category)
        if os.path.exists(cate_path):
            os.mkdir(cate_path)
            
        print('分类创建完毕...')
        print('开始构造imgs路径')
        img_name = request.url.split('/')[-1]
        img_path = os.path.join(cate_path,img_name)
        print('开始存储imgs...')
        print('*'* 50)
        return img_path

