"""数据处理管道，负责数据的采集、清洗、特征提取和存储"""
import json
import time
from typing import Dict, List, Any, Optional, Callable

from confluent_kafka import KafkaError

from fin_senti_entity_platform.utils.logger import get_logger
from fin_senti_entity_platform.utils.config_loader import get_config
from fin_senti_entity_platform.data_collection.storage.kafka_client import KafkaClient
from fin_senti_entity_platform.data_collection.storage.hdfs_client import HDFSClient
from fin_senti_entity_platform.utils.exceptions import DataException

logger = get_logger(__name__)

class DataPipeline:
    """数据处理管道"""
    
    def __init__(self):
        self.config = get_config()
        self.kafka_client = KafkaClient()
        self.hdfs_client = HDFSClient()
        self.metrics = {
            'total_processed': 0,
            'success_count': 0,
            'error_count': 0,
            'processing_time': 0.0,
            'last_processed_time': 0.0
        }
    
    def process_data(self, 
                     source_topic: str, 
                     processor: Callable[[Dict[str, Any]], Dict[str, Any]],
                     sink_topic: Optional[str] = None,
                     batch_size: int = 100, 
                     max_batch_time: int = 30) -> Dict[str, int]:
        """处理数据并可选地写入目标主题
        
        Args:
            source_topic: 源Kafka主题
            processor: 数据处理器函数
            sink_topic: 目标Kafka主题 (可选)
            batch_size: 批处理大小
            max_batch_time: 最大批处理时间(秒)
            
        Returns:
            Dict: 处理统计信息
        """
        start_time = time.time()
        processed_count = 0
        success_count = 0
        error_count = 0
        batch = []
        
        try:
            # 创建消费者
            consumer = self.kafka_client.create_consumer(
                group_id=f"processor_{source_topic}",
                topics=[source_topic]
            )
            batch_start_time = time.time()
            
            while True:
                # 检查批处理时间是否超过最大时间
                if batch and (time.time() - batch_start_time > max_batch_time):
                    # 处理当前批次
                    process_result = self._process_batch(batch, processor, sink_topic)
                    processed_count += process_result['processed']
                    success_count += process_result['success']
                    error_count += process_result['error']
                    
                    # 清空批次
                    batch = []
                    batch_start_time = time.time()
                
                # 从Kafka消费消息
                msg = consumer.poll(1.0)  # 1秒超时
                
                if msg is None:
                    continue
                
                if msg.error():
                    if msg.error().code() == KafkaError._PARTITION_EOF:
                        # 分区结束
                        continue
                    else:
                        logger.error(f"消费消息错误: {msg.error()}")
                        error_count += 1
                        continue
                
                # 解析消息
                try:
                    data = json.loads(msg.value().decode('utf-8'))
                    batch.append(data)
                    
                    # 检查批次大小是否达到最大批次大小
                    if len(batch) >= batch_size:
                        # 处理当前批次
                        process_result = self._process_batch(batch, processor, sink_topic)
                        processed_count += process_result['processed']
                        success_count += process_result['success']
                        error_count += process_result['error']
                        
                        # 清空批次
                        batch = []
                        batch_start_time = time.time()
                except json.JSONDecodeError:
                    logger.error(f"消息解析错误: {msg.value()}")
                    error_count += 1
                    continue
                
                # 提交偏移量
                consumer.commit()
                
                # 检查是否达到处理时间限制
                if time.time() - start_time > 3600:  # 1小时
                    break
        except Exception as e:
            logger.error(f"数据处理管道异常: {str(e)}")
            error_count += 1
        finally:
            # 处理剩余的批次
            if batch:
                process_result = self._process_batch(batch, processor, sink_topic)
                processed_count += process_result['processed']
                success_count += process_result['success']
                error_count += process_result['error']
            
            # 更新指标
            self.metrics['total_processed'] += processed_count
            self.metrics['success_count'] += success_count
            self.metrics['error_count'] += error_count
            self.metrics['processing_time'] += time.time() - start_time
            self.metrics['last_processed_time'] = time.time()
            
            logger.info(f"数据处理完成: 总处理 {processed_count} 条, 成功 {success_count} 条, 失败 {error_count} 条")
        
        return {
            'processed': processed_count,
            'success': success_count,
            'error': error_count
        }
    
    def _process_batch(self, 
                      batch: List[Dict[str, Any]],
                      processor: Callable[[Dict[str, Any]], Dict[str, Any]],
                      sink_topic: Optional[str] = None) -> Dict[str, int]:
        """处理一批数据"""
        processed_count = 0
        success_count = 0
        error_count = 0
        
        results = []
        
        for data in batch:
            processed_count += 1
            
            try:
                # 处理数据
                result = processor(data)
                results.append(result)
                success_count += 1
            except Exception as e:
                logger.error(f"数据处理失败: {str(e)}, 数据: {data}")
                error_count += 1
        
        # 将处理结果写入Kafka
        if sink_topic and results:
            for result in results:
                try:
                    self.kafka_client.send_message(sink_topic, result)
                except Exception as e:
                    logger.error(f"写入Kafka失败: {str(e)}, 数据: {result}")
                    error_count += 1
                    success_count -= 1
        
        return {
            'processed': processed_count,
            'success': success_count,
            'error': error_count
        }
    
    def get_metrics(self) -> Dict[str, Any]:
        """获取数据处理管道的指标"""
        return self.metrics