#!/usr/bin/env python3
"""
Flink Agents 本地模式入口
使用本地执行环境 + Kinesis 轮询 + 批处理
"""
import os
import json
import logging
import time
import boto3
from typing import Dict, Any, List

# Flink Agents imports
from flink_agents.api.execution_environment import AgentsExecutionEnvironment

# 导入共享的 Agent
from agents.behavior_agent import BehaviorAnalysisAgent

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)


def write_to_postgres(results: List[Dict[str, Any]]):
    """批量写入 PostgreSQL"""
    if not results:
        return
    
    try:
        import psycopg2
        
        conn = psycopg2.connect(
            host=os.getenv('POSTGRES_HOST', 'postgres'),
            port=int(os.getenv('POSTGRES_PORT', 5432)),
            database=os.getenv('POSTGRES_DB', 'behavior_analysis'),
            user=os.getenv('POSTGRES_USER', 'postgres'),
            password=os.getenv('POSTGRES_PASSWORD', 'postgres')
        )
        
        cursor = conn.cursor()
        
        # 批量插入
        insert_query = """
            INSERT INTO user_behaviors (
                user_id, url, behavior_type, behavior_description,
                confidence, analysis_method, created_at
            ) VALUES (%s, %s, %s, %s, %s, %s, NOW())
        """
        
        for result in results:
            cursor.execute(insert_query, (
                result['user_id'],
                result['url'],
                result['behavior_type'],
                result['behavior_description'],
                result['confidence'],
                result['analysis_method']
            ))
        
        conn.commit()
        cursor.close()
        conn.close()
        
        logger.info(f"✅ 写入 PostgreSQL 成功：{len(results)} 条记录")
        
    except Exception as e:
        logger.error(f"❌ 写入 PostgreSQL 失败: {e}")
        import traceback
        traceback.print_exc()


def main():
    """主函数 - 本地模式"""
    logger.info("============================================================")
    logger.info("Flink Agents - 本地模式 (LocalExecutionEnvironment)")
    logger.info("============================================================")
    logger.info(f"Kinesis Stream: {os.getenv('KINESIS_STREAM_NAME', 'click-events-stream')}")
    logger.info(f"PostgreSQL: {os.getenv('POSTGRES_HOST', 'postgres')}:{os.getenv('POSTGRES_PORT', 5432)}")
    logger.info(f"LLM: {os.getenv('LLM_PROVIDER', 'deepseek')} - {os.getenv('LLM_MODEL', 'deepseek-chat')}")
    logger.info("============================================================")
    
    logger.info("✅ Prompt 模板已加载")
    
    # 初始化 Kinesis 客户端
    kinesis = boto3.client(
        'kinesis',
        endpoint_url=os.getenv('AWS_ENDPOINT', 'http://localstack:4566'),
        region_name=os.getenv('AWS_REGION', 'us-east-1'),
        aws_access_key_id='test',
        aws_secret_access_key='test'
    )
    logger.info("✅ Kinesis 客户端已初始化")
    
    # 获取 Shard Iterator
    stream_name = os.getenv('KINESIS_STREAM_NAME', 'click-events-stream')
    try:
        response = kinesis.describe_stream(StreamName=stream_name)
        shard_id = response['StreamDescription']['Shards'][0]['ShardId']
        
        shard_iterator_response = kinesis.get_shard_iterator(
            StreamName=stream_name,
            ShardId=shard_id,
            ShardIteratorType='LATEST'
        )
        shard_iterator = shard_iterator_response['ShardIterator']
        logger.info(f"✅ Shard Iterator 已获取: {shard_id}")
    except Exception as e:
        logger.error(f"❌ 获取 Shard Iterator 失败: {e}")
        raise
    
    # 持续轮询 Kinesis
    logger.info("🔄 开始轮询 Kinesis Stream...")
    
    # 批处理配置
    batch_size = 10          # 批次大小
    batch_timeout = 5        # 批次超时（秒）
    events_buffer = []       # 事件缓冲区
    last_flush_time = time.time()  # 上次刷新时间
    
    while True:
        try:
            # 从 Kinesis 读取记录
            response = kinesis.get_records(ShardIterator=shard_iterator, Limit=batch_size)
            records = response['Records']
            shard_iterator = response['NextShardIterator']
            
            if records:
                logger.info(f"📥 收到 {len(records)} 条记录")
                
                # 解析记录并添加到缓冲区
                for record in records:
                    data = json.loads(record['Data'].decode('utf-8'))
                    events_buffer.append(data)
            
            # 判断是否需要处理批次
            current_time = time.time()
            should_flush = (
                len(events_buffer) >= batch_size or                    # 达到批次大小
                (len(events_buffer) > 0 and                            # 有数据且超时
                 current_time - last_flush_time >= batch_timeout)
            )
            
            if should_flush:
                logger.info(f"🔥 处理 {len(events_buffer)} 条事件（触发条件: {'数量' if len(events_buffer) >= batch_size else '超时'}）...")
                
                # ====== 使用 Flink Agents 本地模式处理事件 ======
                # 创建本地执行环境（不传 env 参数）
                env = AgentsExecutionEnvironment.get_execution_environment()
                
                # 准备输入（flink-agents 要求包装成 {'value': data} 格式）
                input_list = [{'value': event_data} for event_data in events_buffer]
                
                # 创建 Agent 并应用处理
                agent = BehaviorAnalysisAgent()
                
                # 应用 Agent 处理（to_list() 返回结果列表）
                output_list = env.from_list(input_list).apply(agent).to_list()
                
                # 执行
                env.execute()
                
                # 提取结果（flink-agents 返回格式：[{UUID: result}]）
                results = []
                for item in output_list:
                    # 可能的格式: {'key': result} 或 直接是 result
                    if isinstance(item, dict):
                        if 'user_id' in item:
                            # 直接是结果
                            results.append(item)
                        else:
                            # 可能是包装格式，提取 value
                            for key, value in item.items():
                                if isinstance(value, dict) and 'user_id' in value:
                                    results.append(value)
                                    break
                
                for result in results:
                    logger.info(f"  ✓ {result['user_id']}: {result['behavior_description']}")
                
                # 批量写入数据库
                write_to_postgres(results)
                
                logger.info(f"✅ 批次处理完成")
                
                # 清空缓冲区
                events_buffer = []
                last_flush_time = time.time()
            else:
                # 没有触发条件，短暂休眠
                time.sleep(1)
        
        except KeyboardInterrupt:
            logger.info("⏹️  收到停止信号，正在退出...")
            break
        except Exception as e:
            logger.error(f"❌ 处理记录时出错: {e}")
            import traceback
            traceback.print_exc()
            time.sleep(5)


if __name__ == '__main__':
    main()

