import json
from pathlib import Path
import logging
from datetime import datetime


class BatchSavePipeline:
    """
    批量保存爬取数据的管道，每1000条数据保存一个JSON文件
    方便后续处理和避免单文件过大
    """

    def __init__(self):
        self.items = []
        self.file_count = 0
        self.item_count = 0
        self.batch_size = 1000
        self.logger = logging.getLogger(__name__)
        self.spider = None  # 初始化为None，避免第一次save_items调用时错误
        
        # 创建数据目录
        self.data_dir = Path('data')
        self.data_dir.mkdir(exist_ok=True)
        
        # 加载已有的数据信息
        self.index_file = self.data_dir / 'index.json'
        if self.index_file.exists():
            try:
                with open(self.index_file, 'r', encoding='utf-8') as f:
                    index_data = json.load(f)
                    self.file_count = index_data.get('total_files', 0)
                    self.item_count = index_data.get('total_items', 0)
                    self.logger.info(f"从索引加载现有数据信息: {self.item_count}个项目, {self.file_count}个文件")
            except Exception as e:
                self.logger.error(f"加载索引文件失败: {e}")
        
    def open_spider(self, spider):
        """爬虫启动时调用"""
        self.spider = spider
        self.logger.info("启动爬虫数据存储管道")
        
    def process_item(self, item, spider):
        """处理每个爬取到的项目"""
        if not self.spider:
            self.spider = spider
            
        self.items.append(dict(item))
        self.item_count += 1
        
        # 每达到batch_size，保存一个文件
        if len(self.items) >= self.batch_size:
            self.save_items()
            
        return item
    
    def save_items(self):
        """保存当前批次项目到文件"""
        if not self.items:
            return
        
        try:
            # 使用带时间戳的文件名，避免覆盖
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            filename = self.data_dir / f'nankai_pages_{self.file_count}_{timestamp}.json'
            
            with open(filename, 'w', encoding='utf-8') as f:
                json.dump(self.items, f, ensure_ascii=False)
                
            spider_name = getattr(self.spider, 'name', 'spider') if self.spider else 'unknown'
            self.logger.info(f"[{spider_name}] 已保存{len(self.items)}个页面到 {filename}")
            
            # 清空列表准备下一批
            self.items = []
            self.file_count += 1
            # 每次保存后更新索引
            self._update_index()
            
        except Exception as e:
            self.logger.error(f"保存数据文件失败: {e}")
    
    def close_spider(self, spider):
        """爬虫关闭时调用"""
        self.spider = spider
        # 确保关闭时保存剩余的items
        if self.items:
            self.save_items()
        
        # 生成最终索引文件
        self._update_index(final=True)
            
        self.logger.info(f"[{spider.name}] 爬取完成，共保存 {self.item_count} 个页面，分为 {self.file_count} 个文件")

    def _update_index(self, final=False):
        """更新索引文件"""
        try:
            # 获取数据目录中的所有JSON文件
            json_files = sorted([f.name for f in self.data_dir.glob('nankai_pages_*.json')])
            
            index_data = {
                'total_items': self.item_count,
                'total_files': self.file_count,
                'batch_size': self.batch_size,
                'last_updated': datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
                'files': json_files,
                'is_complete': final
            }
            
            with open(self.index_file, 'w', encoding='utf-8') as f:
                json.dump(index_data, f, ensure_ascii=False, indent=2)
                
        except Exception as e:
            self.logger.error(f"更新索引文件失败: {e}") 