# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
import os

from itemadapter import ItemAdapter


class FiveprojectPipeline:
    def open_spider(self, spider):
        # 判断是否有文件夹，没有就创建文件夹
        self.save_path = os.getcwd() + "/scrapy前端/"
        if not os.path.exists(self.save_path):  # 判断文件夹是否存在
            os.makedirs(self.save_path)  # 没有就创建文件夹
        static_path = self.save_path + "/_static/"
        if not os.path.exists(static_path):
            os.makedirs(static_path)
        js_path = static_path + "/js/"
        if not os.path.exists(js_path):
            os.makedirs(js_path)
        css_path = static_path + "/css/"
        if not os.path.exists(css_path):
            os.makedirs(css_path)
        intro_path = self.save_path + "/intro/"
        if not os.path.exists(intro_path):
            os.makedirs(intro_path)
        topics_path = self.save_path + "/topics/"
        if not os.path.exists(topics_path):
            os.makedirs(topics_path)

    def process_item(self, item, spider):
        print(self.save_path)
        name = item.get("name")
        content = item.get("content")
        with open(self.save_path + name, "w", encoding="utf-8")as f:
            f.write(content)
            print("保存%s文件成功" % name)
