# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html

import logging
from itemadapter import ItemAdapter
from .models import DatabaseManager


class WeiboCommentsPipeline:
    def __init__(self):
        logging.info("初始化 WeiboCommentsPipeline")
        try:
            self.db_manager = DatabaseManager()
            logging.info("数据库管理器初始化成功")
        except Exception as e:
            logging.error(f"数据库管理器初始化失败: {e}")
            raise
    
    def process_item(self, item, spider):
        """处理爬取到的数据项"""
        try:
            # 将 item 转换为字典
            item_dict = dict(item)
            
            # 确保所有必需的字段都存在
            required_fields = [
                'comment_id', 'created_at', 'user_id', 'user_name',
                'user_city', 'like_counts', 'reply_counts', 'content'
            ]
            
            for field in required_fields:
                if field not in item_dict:
                    item_dict[field] = None
            
            logging.info(f"正在处理数据项: {item_dict['comment_id']}")
            
            # 将数据保存到数据库
            result = self.db_manager.save_comment(item_dict)
            if result:
                logging.info(f"数据保存成功: {item_dict['comment_id']}")
            else:
                logging.error(f"数据保存失败: {item_dict['comment_id']}")
            return item
        except Exception as e:
            logging.error(f"处理数据项时出错: {e}")
            return item
    
    def close_spider(self, spider):
        """爬虫关闭时的处理"""
        try:
            total_comments = self.db_manager.get_comment_count()
            logging.info(f"爬虫结束，总共收集到 {total_comments} 条评论")
        except Exception as e:
            logging.error(f"获取评论总数时出错: {e}")
