"""
处理爬虫抓取的数据的Pipeline
"""
import json
import logging
import os
import time

from itemadapter import ItemAdapter
from scrapy.exceptions import DropItem


class DefaultValuesPipeline:
    """设置默认值的Pipeline"""
    
    def process_item(self, item, spider):
        adapter = ItemAdapter(item)
        
        # 设置通用字段的默认值
        if 'crawl_time' not in adapter:
            adapter['crawl_time'] = int(time.time())
        
        if 'spider_name' not in adapter:
            adapter['spider_name'] = spider.name
        
        return item


class DuplicatesPipeline:
    """去重Pipeline"""
    
    def __init__(self):
        self.ids_seen = set()
    
    def process_item(self, item, spider):
        adapter = ItemAdapter(item)
        
        # 使用URL作为唯一标识
        if 'url' in adapter:
            item_id = adapter['url']
            if item_id in self.ids_seen:
                raise DropItem(f"重复的item: {item_id}")
            self.ids_seen.add(item_id)
        
        return item


class JsonWriterPipeline:
    """将Item保存为JSON文件的Pipeline"""
    
    def __init__(self, output_dir):
        self.output_dir = output_dir
        self.items = []
    
    @classmethod
    def from_crawler(cls, crawler):
        # 从settings中获取输出目录
        output_dir = crawler.settings.get('JSON_OUTPUT_DIR', 'data/output')
        return cls(output_dir)
    
    def open_spider(self, spider):
        # 确保输出目录存在
        os.makedirs(self.output_dir, exist_ok=True)
        self.file = open(os.path.join(self.output_dir, f"{spider.name}_{int(time.time())}.json"), 'w', encoding='utf-8')
        self.file.write('[\n')  # 开始JSON数组
        self.first_item = True
    
    def close_spider(self, spider):
        self.file.write('\n]')  # 结束JSON数组
        self.file.close()
    
    def process_item(self, item, spider):
        line = json.dumps(ItemAdapter(item).asdict(), ensure_ascii=False)
        if self.first_item:
            self.first_item = False
        else:
            self.file.write(',\n')
        self.file.write(line)
        return item


class MongoPipeline:
    """将Item保存到MongoDB的Pipeline"""
    
    def __init__(self, mongo_uri, mongo_db):
        self.mongo_uri = mongo_uri
        self.mongo_db = mongo_db
        self.client = None
        self.db = None
    
    @classmethod
    def from_crawler(cls, crawler):
        return cls(
            mongo_uri=crawler.settings.get('MONGO_URI', 'mongodb://localhost:27017'),
            mongo_db=crawler.settings.get('MONGO_DATABASE', 'myscraper')
        )
    
    def open_spider(self, spider):
        try:
            # 尝试导入pymongo
            import pymongo
            self.client = pymongo.MongoClient(self.mongo_uri)
            self.db = self.client[self.mongo_db]
            logging.info("MongoDB连接成功")
        except ImportError:
            logging.error("未安装pymongo，无法使用MongoPipeline")
            self.client = None
        except Exception as e:
            logging.error(f"MongoDB连接失败: {e}")
            self.client = None
    
    def close_spider(self, spider):
        if self.client:
            self.client.close()
    
    def process_item(self, item, spider):
        if self.client:
            # 根据Item类型选择不同的集合
            collection_name = type(item).__name__.lower().replace('item', 's')
            self.db[collection_name].insert_one(ItemAdapter(item).asdict())
            logging.debug(f"Item已保存到MongoDB集合: {collection_name}")
        return item
