# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
import os

from itemadapter import ItemAdapter


class XiaoshuoPipeline:
    # 可选方法
    # 表示我的爬虫项目运行开启起来以后执行一遍  只执行一遍 不包含在任何爬虫文件里面的业务逻辑的循环
    def open_spider(self,spider):
        print("测试1")
        # os.mkdir(r"D:\六星教育\2203期\2022_3_python\24.crawl_spider使用\xiaoshuo\xiaoshuo\万古神帝")
    def process_item(self, item, spider):
        with open(rf"D:\六星教育\2203期\2022_3_python\24.crawl_spider使用\xiaoshuo\xiaoshuo\万古神帝\{item['title']}.txt","w",encoding="utf-8")as file1:
            # 对字符串进行拼接  让保存的小说排版整洁
            contnet="".join(item["contents"])
            file1.write(contnet)

        return item
    #     # 表示我的爬虫项目运行关闭之前执行一遍  只执行一遍 不包含在任何爬虫文件里面的业务逻辑的循环
    def close_spider(self,spider):
        print("测试2")
