# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
from itemadapter import ItemAdapter


class QidianCrawlerPipeline:
    def process_item(self, item, spider):
        return item


class SaveAsTxtPipeline:
    def process_item(self, item, spider):
        if spider.name == 'qidian_spider':
            # 构建保存路径，包含 books 文件夹和分类文件夹
            category_folder = f"./books/{item["category"]}"
            # 若文件夹不存在则创建
            import os
            os.makedirs(category_folder, exist_ok=True)
            # 构建完整文件名，包含路径
            filename = os.path.join(category_folder, f"{item['title']}.txt")
            print(f"开始保存书籍: {filename}")
            # 使用批量写入优化性能
            with open(filename, 'w', encoding='utf-8') as f:
                content = []
                for chapter in item['chapters']:
                    content.append(f"## {chapter['title']}\n\n")
                    content.append(chapter['content'])
                    content.append('\n\n')
                f.write(''.join(content))
            print(f"保存成功: {filename}")

        return item