# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
import csv
import json

import redis

#
# class ProducerPipeline:
#     def open_spider(self, spider):
#         if spider.name == "prodocter":
#             self.client = redis.Redis()
#             self.client.delete("task")
#
#
#     def process_item(self, item, spider):
#         # print(item, type(item))
#         if spider.name == "prodocter":
#             self.client.lpush("task",json.dumps({
#                 "url": "https://httpbin.org/post",
#                 "meta":{
#
#                 },
#                 "data": item,
#                 "method":"POST"
#             },ensure_ascii= False))
#             return item
#
#     def close_spider(self, spider):
#         if spider.name == "prodocter":
#             self.client.close()
import pymongo
from itemadapter import ItemAdapter



import pymongo
# useful for handling different item types with a single interface
from itemadapter import ItemAdapter

# class MySqlPipeline:
#     def open_spider(self, spider):
#         pass
#
#     def process_item(self, item, spider):
#         # print(item, type(item))
#         print(item['author'], item['text'], item['tags'])
#         return item
#
#     def close_spider(self, spider):
#         pass
#
# class CSVPipeline:
#     def open_spider(self, spider):
#         print(f"open  CSVPipeline ")
#         if spider.name == "quotes_all_page":
#             self.start_id = 100
#             self.f = open(f"{spider.name}.csv", "w", newline="")
#             self.csv_writer = csv.writer(self.f)
#             self.csv_writer.writerow(["id", "author", "text", "tags"])
#
#             self.items = []
#
#     def process_item(self, item, spider):
#         if spider.name == "quotes_all_page":
#             self.start_id += 1
#             self.items.append([self.start_id,   item['author'], item['text'], "|".join(item['tags']),  ])
#             if len(self.items) == 15:
#                 self.csv_writer.writerows(self.items)
#                 self.items.clear()
#         return item
#
#     def close_spider(self, spider):
#         if spider.name == "quotes_all_page":
#             if len(self.items) > 0:
#                 self.csv_writer.writerows(self.items)
#                 self.items.clear()
#             self.f.close()
# #
#
#
#
# class TutorialPipeline:
#     def __init__(self):
#         self.client =pymongo.MongoClient('localhost', 27017)
#         self.db = self.client['quotes']
#         self.collection = self.db['quotes']
#         self.items = []
#
#
#     def process_item(self, item, spider):
#         if spider.name == "quotes_pageall":
#             self.items.append(item)
#             if len(self.items) == 15:
#                 self.collection.insert_many(self.items)
#                 self.items.clear()
#             else:
#                 return item
#
#     def close_spider(self, spider):
#         if spider.name == "quotes_pageall" and len(self.items) > 0:
#             self.collection.insert_many(self.items)
#         self.client.close()

#
# import os
# import json
# from itemadapter import ItemAdapter
#
#

# class CSVPipeline:
#     def open_spider(self, spider):
#         # 如果需要CSV导出，可以在这里实现
#         pass
#
#     def close_spider(self, spider):
#         pass
#
#     def process_item(self, item, spider):
#         return item

import csv
import os
from itemadapter import ItemAdapter

import csv
import os
from itemadapter import ItemAdapter


class CsvPipeline:
    def open_spider(self, spider):
        if not os.path.exists('output'):
            os.makedirs('output')

        # 书籍信息文件
        self.books_file = open('output/books.csv', 'w', newline='', encoding='utf-8-sig')
        self.books_writer = csv.writer(self.books_file)
        self.books_writer.writerow([
            'book_id', 'title', 'author', 'category', 'status',
            'word_count', 'abstract', 'rank', 'read_count', 'update_time'
        ])

        # 章节内容文件
        self.chapters_file = open('output/chapters.csv', 'w', newline='', encoding='utf-8-sig')
        self.chapters_writer = csv.writer(self.chapters_file)
        self.chapters_writer.writerow([
            'book_id', 'book_title', 'book_author', 'book_category',
            'chapter_title', 'chapter_url', 'chapter_order',
            'content', 'content_length'
        ])

        spider.logger.info("CSV文件已准备就绪")

    def close_spider(self, spider):
        self.books_file.close()
        self.chapters_file.close()
        spider.logger.info("CSV文件已保存")

    def process_item(self, item, spider):
        adapter = ItemAdapter(item)

        # 处理书籍信息
        if 'rank' in item:
            self.books_writer.writerow([
                adapter.get('book_id', ''),
                adapter.get('title', ''),
                adapter.get('author', ''),
                adapter.get('category', ''),
                adapter.get('status', ''),
                adapter.get('word_count', ''),
                adapter.get('abstract', ''),
                adapter.get('rank', ''),
                adapter.get('read_count', ''),
                adapter.get('update_time', '')
            ])
            spider.logger.info(f"📚 书籍保存: {adapter.get('title', '')}")

        # 处理章节内容
        elif 'content' in item:
            content = adapter.get('content', '')
            self.chapters_writer.writerow([
                adapter.get('book_id', ''),
                adapter.get('book_title', ''),
                adapter.get('book_author', ''),
                adapter.get('book_category', ''),
                adapter.get('chapter_title', ''),
                adapter.get('chapter_url', ''),
                adapter.get('chapter_order', 0),
                content,
                len(content)
            ])
            spider.logger.info(f"📖 章节保存: {adapter.get('chapter_title', '')}, 字数: {len(content)}")

        return item