# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
import os
import urllib.request


class GzSpiderPipeline:
    def process_item(self, item, spider):
        item.save()
        return item


class ShenzhenPipeline:
    def __init__(self):
        # 创建文件保存文件夹
        self.image_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'images')
        if not os.path.exists(self.image_path):
            os.mkdir(self.image_path)

    def process_item(self, item, spider):
        # 写入数据库
        item.save()

        title = item.get('title')
        image_urls = item.get('image_urls')

        # 创建图片类别文件夹
        category_path = os.path.join(self.image_path, title)
        if not os.path.exists(category_path):
            os.mkdir(category_path)

        # 根据图片url进行下载保存图片
        if image_urls:
            for image_url in image_urls:
                urllib.request.urlretrieve(url=image_url[0], filename=os.path.join(category_path, image_url[1]))

        return item
