# -*- coding: utf-8 -*-

"""
管道包
"""

import json
import os
from pathlib import Path
from typing import Dict, Set
import time
from scrapy import signals
from scrapy.exceptions import DropItem
from itemadapter import ItemAdapter
import logging
from concurrent.futures import ThreadPoolExecutor, as_completed
import threading

from fire_control_spider.items import WebPageItem, MediaItem
from fire_control_spider.utils import FileUtils, ContentProcessor


class ValidationPipeline:
    """数据验证Pipeline"""
    
    def process_item(self, item, spider):
        adapter = ItemAdapter(item)
        
        if isinstance(item, WebPageItem):
            # 验证必须字段
            required_fields = ['track_id', 'url']
            for field in required_fields:
                if not adapter.get(field):
                    raise DropItem(f"Missing required field: {field} in {item}")
        
        return item


class DuplicatesPipeline:
    """去重Pipeline"""
    
    def __init__(self):
        self.ids_seen: Set[str] = set()
        self.urls_seen: Set[str] = set()
    
    def process_item(self, item, spider):
        adapter = ItemAdapter(item)
        
        if isinstance(item, WebPageItem):
            track_id = adapter['track_id']
            url = adapter['url']
            
            if track_id in self.ids_seen or url in self.urls_seen:
                raise DropItem(f"Duplicate item found: {track_id}")
            else:
                self.ids_seen.add(track_id)
                self.urls_seen.add(url)
        
        return item


class MediaDownloadPipeline:
    """媒体文件下载Pipeline - 已禁用，改用AsyncMediaDownloadPipeline"""
    
    def process_item(self, item, spider):
        # 这个管道已被禁用，直接返回item
        return item


class JsonlWriterPipeline:
    """JSONL文件写入Pipeline"""
    
    def __init__(self, output_dir: str, max_size: int, max_records: int):
        self.output_dir = Path(output_dir)
        self.max_size = max_size
        self.max_records = max_records
        self.writers: Dict[str, 'JsonlWriter'] = {}
    
    @classmethod
    def from_crawler(cls, crawler):
        return cls(
            output_dir=crawler.settings.get("OUTPUT_DIR", "outputs"),
            max_size=crawler.settings.get("JSONL_MAX_SIZE", 1024*1024*1024),
            max_records=crawler.settings.get("JSONL_MAX_RECORDS", 50000)
        )
    
    def open_spider(self, spider):
        """初始化写入器"""
        pass
    
    def close_spider(self, spider):
        """关闭所有写入器"""
        for writer in self.writers.values():
            writer.close()
    
    def process_item(self, item, spider):
        if isinstance(item, WebPageItem):
            adapter = ItemAdapter(item)
            
            # 使用爬虫名称来确定站点目录
            # 从spider_config.json中获取对应的site_name
            from fire_control_spider.config import create_default_config
            config = create_default_config()
            
            # 查找当前爬虫的配置
            spider_name = spider.name
            site_name = None
            
            for spider_config in config['spiders']:
                if spider_config['name'] == spider_name:
                    site_name = spider_config['site_name']
                    break
            
            # 如果没找到配置，使用爬虫名称作为后备
            if not site_name:
                site_name = spider_name
            
            # 获取或创建写入器
            if site_name not in self.writers:
                site_dir = self.output_dir / site_name / "jsonl"
                site_dir.mkdir(parents=True, exist_ok=True)
                self.writers[site_name] = JsonlWriter(
                    site_dir, self.max_size, self.max_records
                )
            
            # 准备数据，确保headers中的bytes转换为字符串
            def clean_headers(headers_dict):
                """将headers中的bytes类型转换为字符串"""
                cleaned = {}
                for key, value in headers_dict.items():
                    # 确保key是字符串
                    if isinstance(key, bytes):
                        key = key.decode('utf-8', errors='ignore')
                    # 确保value是字符串
                    if isinstance(value, bytes):
                        value = value.decode('utf-8', errors='ignore')
                    elif isinstance(value, (list, tuple)) and value:
                        # 处理value是列表的情况
                        value = [v.decode('utf-8', errors='ignore') if isinstance(v, bytes) else str(v) for v in value]
                        value = ', '.join(value)
                    else:
                        value = str(value)
                    cleaned[str(key)] = value
                return cleaned
            
            data = {
                'track_id': adapter['track_id'],
                'url': adapter['url'],
                'category': adapter.get('category', ''),
                'publish_time': adapter.get('publish_time', ''),
                'title': adapter.get('title', ''),
                'main_body': adapter.get('main_body', ''),
                'main_file': [
                    {'name': f.get('name', ''), 'path': f.get('path', '')}
                    for f in adapter.get('main_files', [])
                ],  # 只保留name和path
                'attachment_file': [
                    {'name': f.get('name', ''), 'path': f.get('path', '')}
                    for f in adapter.get('attachment_files', [])
                ],  # 只保留name和path
                'remark': adapter.get('remark', {})
            }
            
            # 写入数据
            self.writers[site_name].write_record(data)
        
        return item


class JsonlWriter:
    """JSONL文件写入器"""
    
    def __init__(self, output_dir: Path, max_size: int, max_records: int):
        self.output_dir = output_dir
        self.max_size = max_size
        self.max_records = max_records
        self.current_file = None
        self.current_size = 0
        self.current_records = 0
        self.file_index = 1
    
    def write_record(self, data: dict):
        """写入一条记录"""
        # 序列化数据
        json_line = json.dumps(data, ensure_ascii=False, separators=(',', ':'))
        json_bytes = json_line.encode('utf-8') + b'\n'
        
        # 检查是否需要新文件
        if (self.current_file is None or 
            self.current_size + len(json_bytes) > self.max_size or
            self.current_records >= self.max_records):
            self._rotate_file()
        
        # 写入数据
        self.current_file.write(json_bytes)
        self.current_size += len(json_bytes)
        self.current_records += 1
    
    def _rotate_file(self):
        """轮转文件"""
        if self.current_file:
            self.current_file.close()
        
        filename = f"data_{self.file_index:04d}.jsonl"
        filepath = self.output_dir / filename
        self.current_file = open(filepath, 'wb')
        self.current_size = 0
        self.current_records = 0
        self.file_index += 1
    
    def close(self):
        """关闭写入器"""
        if self.current_file:
            self.current_file.close()
            self.current_file = None


# 从async_download.py导入异步下载管道
from .async_download import AsyncMediaDownloadPipeline, AsyncFileDownloader

__all__ = [
    'ValidationPipeline',
    'DuplicatesPipeline', 
    'MediaDownloadPipeline',
    'JsonlWriterPipeline',
    'JsonlWriter',
    'AsyncMediaDownloadPipeline',
    'AsyncFileDownloader'
] 