from kafka import KafkaProducer, KafkaConsumer
from kafka.admin import KafkaAdminClient, NewTopic
from kafka.errors import KafkaError
import json
from typing import Dict, Any, List
import threading
import time
from datetime import datetime
from pathlib import Path
import logging
from concurrent.futures import ThreadPoolExecutor
import signal
import sys
from job_agent_system import JobAgentSystem
from knowledge_graph import JobKnowledgeGraph

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger("KafkaPipeline")

class JobDataPipeline:
    """实时岗位数据处理管道"""
    def __init__(self, config: Dict[str, Any]):
        self.config = config
        self.running = False
        self.producer = None
        self.consumer = None
        self.admin_client = None
        self.agent_system = JobAgentSystem()
        self.kg_client = JobKnowledgeGraph()
        self.executor = ThreadPoolExecutor(max_workers=4)
        
        # 初始化Kafka
        self._init_kafka()
        
    def _init_kafka(self):
        """初始化Kafka连接"""
        try:
            # 创建管理客户端
            self.admin_client = KafkaAdminClient(
                bootstrap_servers=self.config["bootstrap_servers"]
            )
            
            # 创建主题(如果不存在)
            existing_topics = self.admin_client.list_topics()
            if self.config["topic"] not in existing_topics:
                self.admin_client.create_topics([
                    NewTopic(
                        name=self.config["topic"],
                        num_partitions=3,
                        replication_factor=1
                    )
                ])
            
            # 创建生产者
            self.producer = KafkaProducer(
                bootstrap_servers=self.config["bootstrap_servers"],
                value_serializer=lambda v: json.dumps(v).encode('utf-8'),
                acks='all',
                retries=3
            )
            
            # 创建消费者
            self.consumer = KafkaConsumer(
                self.config["topic"],
                bootstrap_servers=self.config["bootstrap_servers"],
                group_id=self.config["group_id"],
                auto_offset_reset='earliest',
                enable_auto_commit=True,
                value_deserializer=lambda x: json.loads(x.decode('utf-8'))
            )
            
        except KafkaError as e:
            logger.error(f"Kafka初始化失败: {str(e)}")
            raise
        
    def start(self):
        """启动管道"""
        if self.running:
            logger.warning("管道已经在运行")
            return
            
        self.running = True
        logger.info("启动岗位数据管道...")
        
        # 启动消费者线程
        consumer_thread = threading.Thread(
            target=self._consume_messages,
            daemon=True
        )
        consumer_thread.start()
        
        # 启动监控线程
        monitor_thread = threading.Thread(
            target=self._monitor_pipeline,
            daemon=True
        )
        monitor_thread.start()
        
        # 注册信号处理
        signal.signal(signal.SIGINT, self._shutdown)
        signal.signal(signal.SIGTERM, self._shutdown)
        
    def _consume_messages(self):
        """消费和处理消息"""
        logger.info("开始消费消息...")
        try:
            for message in self.consumer:
                if not self.running:
                    break
                    
                try:
                    # 处理消息
                    self._process_job_data(message.value)
                    
                except Exception as e:
                    logger.error(f"处理消息失败: {str(e)}", exc_info=True)
                    
        except Exception as e:
            logger.error(f"消费者错误: {str(e)}", exc_info=True)
        finally:
            self.consumer.close()
            
    def _process_job_data(self, job_data: Dict[str, Any]):
        """处理岗位数据"""
        logger.info(f"处理岗位数据: {job_data.get('job_id')}")
        
        # 使用多智能体系统分析岗位
        analysis = self.agent_system.analyze_job_posting(job_data)
        
        # 存入知识图谱
        self.kg_client.add_job_posting(job_data)
        
        # 记录处理结果
        self._log_processing_result(job_data["job_id"], analysis)
        
    def _log_processing_result(self, job_id: str, analysis: Dict[str, Any]):
        """记录处理结果"""
        log_dir = Path(self.config.get("log_dir", "b/data/logs"))
        log_dir.mkdir(parents=True, exist_ok=True)
        
        log_file = log_dir / f"job_{job_id}.json"
        with open(log_file, "w") as f:
            json.dump({
                "job_id": job_id,
                "processed_at": datetime.now().isoformat(),
                "analysis": analysis
            }, f, indent=2)
        
        logger.info(f"处理结果已记录到 {log_file}")
    
    def produce_job_data(self, job_data: Dict[str, Any]):
        """生产岗位数据"""
        if not self.running:
            raise RuntimeError("管道未运行")
            
        try:
            future = self.producer.send(
                topic=self.config["topic"],
                value=job_data
            )
            
            # 异步处理发送结果
            future.add_callback(
                self._on_send_success,
                job_data["job_id"]
            ).add_errback(
                self._on_send_error,
                job_data["job_id"]
            )
            
        except KafkaError as e:
            logger.error(f"发送消息失败: {str(e)}")
            raise
            
    def _on_send_success(self, job_id: str, metadata):
        """消息发送成功回调"""
        logger.info(f"岗位数据 {job_id} 已发送到分区 {metadata.partition}")
        
    def _on_send_error(self, job_id: str, exc):
        """消息发送失败回调"""
        logger.error(f"发送岗位数据 {job_id} 失败: {str(exc)}")
        
    def _monitor_pipeline(self):
        """监控管道状态"""
        while self.running:
            try:
                # 检查消费者状态
                consumer_lag = self._get_consumer_lag()
                logger.info(f"消费者延迟: {consumer_lag} 条消息")
                
                # 检查生产者状态
                producer_metrics = self.producer.metrics()
                logger.debug(f"生产者指标: {producer_metrics}")
                
            except Exception as e:
                logger.error(f"监控错误: {str(e)}")
                
            time.sleep(10)
            
    def _get_consumer_lag(self) -> Dict[str, int]:
        """获取消费者延迟"""
        consumer_lag = {}
        for tp in self.consumer.assignment():
            end_offset = self.consumer.end_offsets([tp])[tp]
            current_offset = self.consumer.position(tp)
            consumer_lag[str(tp)] = end_offset - current_offset
        return consumer_lag
    
    def _shutdown(self, signum=None, frame=None):
        """关闭管道"""
        logger.info("正在关闭管道...")
        self.running = False
        
        try:
            if self.producer:
                self.producer.flush()
                self.producer.close()
                
            if self.consumer:
                self.consumer.close()
                
            if self.admin_client:
                self.admin_client.close()
                
            self.executor.shutdown(wait=False)
            
        except Exception as e:
            logger.error(f"关闭错误: {str(e)}")
            
        logger.info("管道已关闭")
        sys.exit(0)

if __name__ == "__main__":
    # 示例配置
    config = {
        "bootstrap_servers": "localhost:9092",
        "topic": "job_postings",
        "group_id": "job_processing_group",
        "log_dir": "b/data/logs"
    }
    
    # 创建并启动管道
    pipeline = JobDataPipeline(config)
    pipeline.start()
    
    # 示例数据
    sample_job = {
        "job_id": "job_123",
        "title": "Senior Data Scientist",
        "company": "DataTech Inc.",
        "description": "Develop machine learning models for predictive analytics.",
        "required_skills": ["Python", "Machine Learning", "SQL", "Statistics"],
        "industry": "IT",
        "posted_at": datetime.now().isoformat()
    }
    
    # 生产示例数据
    try:
        pipeline.produce_job_data(sample_job)
        
        # 保持运行
        while True:
            time.sleep(1)
            
    except KeyboardInterrupt:
        pipeline._shutdown()