#!/usr/bin/env python3
"""
Flink Agents 集群模式入口
使用真正的 Flink DataStream API + 集群执行

仅支持批量模式（batch）：处理完 Kinesis 中的所有数据后退出
- 适合批量数据处理、数据回填、定时任务等场景
- 由于 PyFlink 1.20 Kinesis DataStream API 限制，暂不支持持续流式消费
- 详见 docs/architecture.md 中的技术说明
"""
import os
import json
import logging
from typing import Dict, Any

# PyFlink imports
from pyflink.common import Configuration
from pyflink.datastream import StreamExecutionEnvironment, RuntimeExecutionMode

# Flink Agents imports
from flink_agents.api.execution_environment import AgentsExecutionEnvironment

# 导入共享的 Agent
from agents.behavior_agent import BehaviorAnalysisAgent

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)


def write_to_postgres(value):
    """简单的 PostgreSQL 写入函数（用于集群模式）"""
    import psycopg2
    try:
        # 解析 JSON
        if isinstance(value, str):
            value = json.loads(value)
        
        # 如果是包装格式，提取实际数据
        if 'user_id' not in value and len(value) == 1:
            value = list(value.values())[0]
        
        # 连接数据库
        conn = psycopg2.connect(
            host=os.getenv('POSTGRES_HOST', 'postgres'),
            port=int(os.getenv('POSTGRES_PORT', 5432)),
            database=os.getenv('POSTGRES_DB', 'behavior_analysis'),
            user=os.getenv('POSTGRES_USER', 'postgres'),
            password=os.getenv('POSTGRES_PASSWORD', 'postgres')
        )
        cursor = conn.cursor()
        
        # 写入数据
        insert_query = """
            INSERT INTO user_behaviors (
                user_id, url, behavior_type, behavior_description,
                confidence, analysis_method, created_at
            ) VALUES (%s, %s, %s, %s, %s, %s, NOW())
        """
        
        cursor.execute(insert_query, (
            value['user_id'],
            value['url'],
            value['behavior_type'],
            value['behavior_description'],
            value['confidence'],
            value['analysis_method']
        ))
        
        conn.commit()
        cursor.close()
        conn.close()
        logger.debug(f"✅ 写入 PostgreSQL: {value['user_id']}")
        
    except Exception as e:
        logger.error(f"❌ 写入 PostgreSQL 失败: {e}")
        import traceback
        traceback.print_exc()


def main():
    """主函数 - 集群模式（仅批量处理）"""
    logger.info("============================================================")
    logger.info("Flink Agents - 集群模式 (RemoteExecutionEnvironment)")
    logger.info("============================================================")
    logger.info("运行模式: BATCH（批量处理后退出）")
    logger.info(f"Flink JobManager: {os.getenv('FLINK_JOBMANAGER', 'flink-jobmanager:8081')}")
    logger.info(f"Kinesis Stream: {os.getenv('KINESIS_STREAM_NAME', 'click-events-stream')}")
    logger.info(f"PostgreSQL: {os.getenv('POSTGRES_HOST', 'postgres')}:{os.getenv('POSTGRES_PORT', 5432)}")
    logger.info(f"LLM: {os.getenv('LLM_PROVIDER', 'deepseek')} - {os.getenv('LLM_MODEL', 'deepseek-chat')}")
    logger.info("============================================================")
    
    # ====== 1. 创建 PyFlink StreamExecutionEnvironment（集群模式）======
    logger.info("📝 Step 1: 配置 Flink 集群连接...")
    
    # 创建配置对象并设置 JobManager 地址
    config = Configuration()
    
    # 设置 JobManager 的 REST 地址（用于作业提交）
    jobmanager_address = os.getenv('FLINK_JOBMANAGER', 'flink-jobmanager:8081')
    jobmanager_host = jobmanager_address.split(':')[0]
    jobmanager_port = jobmanager_address.split(':')[1] if ':' in jobmanager_address else '8081'
    
    config.set_string("rest.address", jobmanager_host)
    config.set_string("rest.port", jobmanager_port)
    config.set_string("jobmanager.rpc.address", jobmanager_host)
    config.set_string("jobmanager.rpc.port", "6123")  # RPC 端口
    
    logger.info(f"📝 JobManager REST: {jobmanager_host}:{jobmanager_port}")
    logger.info(f"📝 JobManager RPC: {jobmanager_host}:6123")
    
    # 创建带配置的 StreamExecutionEnvironment
    logger.info("📝 Step 2: 创建 StreamExecutionEnvironment...")
    try:
        env = StreamExecutionEnvironment.get_execution_environment(config)
        logger.info("📝 Step 3: 设置运行模式和并行度...")
        env.set_runtime_mode(RuntimeExecutionMode.STREAMING)
        env.set_parallelism(int(os.getenv('FLINK_PARALLELISM', 2)))
        logger.info("✅ PyFlink StreamExecutionEnvironment 已创建（集群模式）")
    except Exception as e:
        logger.error(f"❌ 创建 StreamExecutionEnvironment 失败: {e}")
        import traceback
        traceback.print_exc()
        raise

    # ====== 2. 从 Kinesis 批量读取数据 ======
    logger.info("📝 批量读取 Kinesis 现有数据...")
    import boto3
    from botocore.exceptions import ClientError

    kinesis_client = boto3.client(
        'kinesis',
        endpoint_url=os.getenv('AWS_ENDPOINT', 'http://localstack:4566'),
        region_name=os.getenv('AWS_REGION', 'us-east-1'),
        aws_access_key_id='test',
        aws_secret_access_key='test'
    )

    stream_name = os.getenv('KINESIS_STREAM_NAME', 'click-events-stream')

    # 读取所有 shard 的数据
    events_to_process = []
    try:
        # 获取所有 shards
        response = kinesis_client.describe_stream(StreamName=stream_name)
        shards = response['StreamDescription']['Shards']
        logger.info(f"📝 找到 {len(shards)} 个 shards")

        # 从每个 shard 读取数据
        for shard in shards:
            shard_id = shard['ShardId']

            # 获取 shard iterator（从最早的记录开始）
            iterator_response = kinesis_client.get_shard_iterator(
                StreamName=stream_name,
                ShardId=shard_id,
                ShardIteratorType='TRIM_HORIZON'  # 从最早的记录开始
            )
            shard_iterator = iterator_response['ShardIterator']

            # 持续读取该 shard 的所有数据
            while shard_iterator:
                records_response = kinesis_client.get_records(
                    ShardIterator=shard_iterator,
                    Limit=100
                )

                records = records_response['Records']
                for record in records:
                    event_data = json.loads(record['Data'])
                    events_to_process.append({"value": event_data})

                # 获取下一个 iterator
                shard_iterator = records_response.get('NextShardIterator')

                # 如果没有更多数据了，退出
                if not records:
                    break

        logger.info(f"✅ 从 Kinesis 读取到 {len(events_to_process)} 条事件")

        # 如果没有数据，使用少量模拟数据
        if not events_to_process:
            logger.warning("⚠️  Kinesis 中没有数据，使用模拟数据")
            events_to_process = [
                {"value": {"userId": "test_user_cluster_1", "eventType": "click", "url": "http://example.com/cluster/1"}},
                {"value": {"userId": "test_user_cluster_2", "eventType": "click", "url": "http://example.com/cluster/2"}},
            ]

    except ClientError as e:
        logger.error(f"❌ 读取 Kinesis 失败: {e}")
        logger.warning("⚠️  使用模拟数据")
        events_to_process = [
            {"value": {"userId": "test_user_cluster_1", "eventType": "click", "url": "http://example.com/cluster/1"}},
            {"value": {"userId": "test_user_cluster_2", "eventType": "click", "url": "http://example.com/cluster/2"}},
        ]

    # 创建批量数据源
    try:
        source_stream = env.from_collection(collection=events_to_process)
        logger.info("✅ 批量数据源已创建（处理完后退出）")
    except Exception as e:
        logger.error(f"❌ 创建数据源失败: {e}")
        import traceback
        traceback.print_exc()
        raise

    logger.info("✅ Kinesis 数据源已配置")
    
    # ====== 3. 创建 Flink Agents 环境（传入 env）======
    logger.info("📝 创建 Flink Agents 环境...")
    agents_env = AgentsExecutionEnvironment.get_execution_environment(env=env)
    logger.info("✅ Flink Agents 环境已创建（集群模式）")
    
    # ====== 4. 应用 Agent 处理 ======
    # 使用 from_datastream（集群模式的关键）
    logger.info("📝 应用 Agent 到 DataStream...")
    output_stream = (
        agents_env
        .from_datastream(
            input=source_stream,
            key_selector=lambda x: x.get('value', {}).get('userId', 'unknown')
        )
        .apply(BehaviorAnalysisAgent())
        .to_datastream()
    )
    logger.info("✅ Agent 已应用到 DataStream")
    
    # ====== 5. 添加 Sink（PostgreSQL）======
    # 将结果转换回 JSON 字符串（便于 sink 处理）
    output_json_stream = output_stream.map(
        lambda x: json.dumps(x) if not isinstance(x, str) else x
    )
    
    # 添加 PostgreSQL Sink（使用 for_each 进行写入）
    logger.info("📝 添加 PostgreSQL Sink...")
    output_json_stream.map(write_to_postgres)
    logger.info("✅ PostgreSQL Sink 已添加")
    
    # ====== 6. 执行作业（提交到 Flink 集群）======
    logger.info("🚀 准备提交作业到 Flink 集群...")
    logger.info("📝 调用 agents_env.execute()...")
    try:
        agents_env.execute()  # flink-agents 的 execute() 不接受参数
        logger.info("✅ agents_env.execute() 返回成功")
    except Exception as e:
        logger.error(f"❌ agents_env.execute() 失败: {e}")
        import traceback
        traceback.print_exc()
        raise
    
    logger.info("✅ 作业已完成")


if __name__ == '__main__':
    main()

