# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
import os


class ScrapyWebPipeline:
    def open_spider(self, spider):
        self.download_path = os.getcwd() + '/scrapy/'
        if not os.path.exists(self.download_path):
            os.makedirs(self.download_path)

    def process_item(self, item, spider):
        # print(item['name'])
        # 切割获取文件夹名
        a = str(item['name']).split('/')
        file_path = self.download_path
        # 创建文件夹
        while len(a) > 1:
            file_path += a[0] + '/'
            a.pop(0)
            download_path = file_path
            if not os.path.exists(download_path):
                os.makedirs(download_path)
        # 创建文件并写入内容
        print('正在存贮到...'+file_path + a[0])
        with open(file_path + a[0], 'w', encoding='utf-8') as f:
            f.write(item['text'])
        return item
