package com.flink.hbase.elasticsearch2kafka;

import com.flink.hbase.kafka2elasticsearch.UserDocument;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.api.java.utils.ParameterTool;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.connector.base.DeliveryGuarantee;
import org.apache.flink.connector.kafka.sink.KafkaRecordSerializationSchema;
import org.apache.flink.connector.kafka.sink.KafkaSink;
import org.apache.flink.runtime.state.hashmap.HashMapStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.ProcessFunction;
import org.apache.flink.util.Collector;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.HashMap;
import java.util.Map;
import java.util.Properties;

/**
 * Elasticsearch 到 Kafka 数据管道作业
 * 从 ES 多线程读取数据，转换后发送到 Kafka 队列
 */
public class Elasticsearch2KafkaJob {
    private static final Logger LOG = LoggerFactory.getLogger(Elasticsearch2KafkaJob.class);
    
    public static void main(String[] args) throws Exception {
        // 解析参数
        ParameterTool params = ParameterTool.fromArgs(args);
        
        // 验证必要参数
        validateParameters(params);
        
        // 创建执行环境
        StreamExecutionEnvironment env = createExecutionEnvironment(params);
        
        // 创建 ES 配置
        Map<String, Object> esConfig = createElasticsearchConfig(params);
        
        // 创建数据流
        DataStream<UserDocument> userDocumentStream = createUserDocumentStream(env, params, esConfig);
        
        // 转换为 Kafka 消息
        DataStream<Tuple2<String, String>> kafkaMessageStream = userDocumentStream
            .map(createDocumentConverter(params))
            .name("Document to Kafka Converter")
            .uid("doc-kafka-converter");
        
        // 过滤空消息
        DataStream<Tuple2<String, String>> validMessageStream = kafkaMessageStream
            .filter(tuple -> tuple != null && tuple.f0 != null && tuple.f1 != null)
            .name("Message Validation")
            .uid("message-validation");
        
        // 发送到 Kafka
        sendToKafka(validMessageStream, params);
        
        // 可选：添加监控输出
        if (params.getBoolean("monitoring.enabled", false)) {
            addMonitoringOutput(validMessageStream, params);
        }
        
        // 执行作业
        env.execute("Elasticsearch to Kafka Job");
    }
    
    /**
     * 验证必要参数
     */
    private static void validateParameters(ParameterTool params) {
        String[] requiredParams = {
            "es.hosts",
            "es.indices",
            "kafka.bootstrap.servers",
            "kafka.topic"
        };
        
        for (String param : requiredParams) {
            if (!params.has(param)) {
                throw new IllegalArgumentException("Missing required parameter: " + param);
            }
        }
        
        LOG.info("Parameter validation completed successfully");
    }
    
    /**
     * 创建执行环境
     */
    private static StreamExecutionEnvironment createExecutionEnvironment(ParameterTool params) {
        Configuration config = new Configuration();
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(config);
        
        // 设置全局参数
        env.getConfig().setGlobalJobParameters(params);
        
        // 设置并行度
        int parallelism = params.getInt("parallelism", 4);
        env.setParallelism(parallelism);
        
        // 设置重启策略
        env.setRestartStrategy(RestartStrategies.fixedDelayRestart(
            params.getInt("restart.attempts", 3),
            org.apache.flink.api.common.time.Time.seconds(params.getInt("restart.delay", 10))
        ));
        
        // 启用检查点
        if (params.getBoolean("checkpoint.enabled", true)) {
            long checkpointInterval = params.getLong("checkpoint.interval", 60000);
            env.enableCheckpointing(checkpointInterval, CheckpointingMode.EXACTLY_ONCE);
            env.getCheckpointConfig().setMinPauseBetweenCheckpoints(5000);
            env.getCheckpointConfig().setCheckpointTimeout(300000);
            env.getCheckpointConfig().setMaxConcurrentCheckpoints(1);
        }
        
        // 设置状态后端
        env.setStateBackend(new HashMapStateBackend());
        
        LOG.info("Execution environment created with parallelism: {}", parallelism);
        return env;
    }
    
    /**
     * 创建 Elasticsearch 配置
     */
    private static Map<String, Object> createElasticsearchConfig(ParameterTool params) {
        Map<String, Object> config = new HashMap<>();
        
        // 基本配置
        config.put("hosts", params.get("es.hosts"));
        config.put("scheme", params.get("es.scheme", "http"));
        
        // 认证配置
        if (params.has("es.username")) {
            config.put("username", params.get("es.username"));
        }
        if (params.has("es.password")) {
            config.put("password", params.get("es.password"));
        }
        
        // 超时配置
        config.put("connect.timeout", params.getInt("es.connect.timeout", 5000));
        config.put("socket.timeout", params.getInt("es.socket.timeout", 60000));
        config.put("connection.request.timeout", params.getInt("es.connection.request.timeout", 5000));
        
        LOG.info("Elasticsearch configuration created with {} properties", config.size());
        return config;
    }
    
    /**
     * 创建用户文档数据流
     */
    private static DataStream<UserDocument> createUserDocumentStream(StreamExecutionEnvironment env, 
                                                                   ParameterTool params, 
                                                                   Map<String, Object> esConfig) {
        
        // ES Source 配置
        String indicesStr = params.get("es.indices");
        String[] indices = indicesStr.split(",");
        for (int i = 0; i < indices.length; i++) {
            indices[i] = indices[i].trim();
        }
        
        String query = params.get("es.query", "");
        int scrollSize = params.getInt("es.scroll.size", 1000);
        int scrollTimeoutMs = params.getInt("es.scroll.timeout", 60000);
        int readThreads = params.getInt("es.read.threads", 4);
        
        // 创建 ES Source
        UserDocumentSource source = UserDocumentSource.create(
            indices, query, scrollSize, scrollTimeoutMs, readThreads, esConfig
        );
        
        // 添加到数据流
        DataStream<UserDocument> userStream = env.addSource(source)
            .name("Elasticsearch Multithreaded Source")
            .uid("es-source");
        
        LOG.info("User document stream created from indices: {}", indicesStr);
        return userStream;
    }
    
    /**
     * 创建文档转换器
     */
    private static DocumentToKafkaConverter createDocumentConverter(ParameterTool params) {
        String messageFormat = params.get("converter.message.format", "json");
        boolean includeMetadata = params.getBoolean("converter.include.metadata", true);
        String timestampField = params.get("converter.timestamp.field", "update_time");
        boolean enableEnhancement = params.getBoolean("converter.enhancement.enabled", false);
        
        if (enableEnhancement) {
            return DocumentToKafkaConverter.createEnhanced(messageFormat, includeMetadata, timestampField);
        } else {
            return DocumentToKafkaConverter.create(messageFormat, includeMetadata, timestampField);
        }
    }
    
    /**
     * 发送到 Kafka
     */
    private static void sendToKafka(DataStream<Tuple2<String, String>> messageStream, ParameterTool params) {
        String bootstrapServers = params.get("kafka.bootstrap.servers");
        String topic = params.get("kafka.topic");
        
        // 创建 Kafka Sink
        KafkaSink<Tuple2<String, String>> kafkaSink = KafkaSink.<Tuple2<String, String>>builder()
            .setBootstrapServers(bootstrapServers)
            .setRecordSerializer(KafkaRecordSerializationSchema.builder()
                .setTopic(topic)
                .setKeySerializationSchema(new SimpleStringSchema())
                .setValueSerializationSchema(new SimpleStringSchema())
                .build())
            .setDeliveryGuarantee(getDeliveryGuarantee(params))
            .setKafkaProducerConfig(createKafkaProducerProperties(params))
            .build();
        
        // 转换为 ProducerRecord 格式并发送
        messageStream
            .process(new ProcessFunction<Tuple2<String, String>, Tuple2<String, String>>() {
                @Override
                public void processElement(Tuple2<String, String> value, Context ctx, Collector<Tuple2<String, String>> out) throws Exception {
                    out.collect(value);
                }
            })
            .sinkTo(kafkaSink)
            .name("Kafka Sink")
            .uid("kafka-sink");
        
        LOG.info("Kafka sink configured for topic: {}", topic);
    }
    
    /**
     * 获取投递保证级别
     */
    private static DeliveryGuarantee getDeliveryGuarantee(ParameterTool params) {
        String guarantee = params.get("kafka.delivery.guarantee", "at_least_once");
        
        switch (guarantee.toLowerCase()) {
            case "exactly_once":
                return DeliveryGuarantee.EXACTLY_ONCE;
            case "at_least_once":
                return DeliveryGuarantee.AT_LEAST_ONCE;
            case "none":
                return DeliveryGuarantee.NONE;
            default:
                LOG.warn("Unknown delivery guarantee: {}, using at_least_once", guarantee);
                return DeliveryGuarantee.AT_LEAST_ONCE;
        }
    }
    
    /**
     * 创建 Kafka 生产者属性
     */
    private static Properties createKafkaProducerProperties(ParameterTool params) {
        Properties props = new Properties();
        
        // 基本配置
        props.setProperty(ProducerConfig.ACKS_CONFIG, params.get("kafka.acks", "all"));
        props.setProperty(ProducerConfig.RETRIES_CONFIG, params.get("kafka.retries", "3"));
        props.setProperty(ProducerConfig.BATCH_SIZE_CONFIG, params.get("kafka.batch.size", "16384"));
        props.setProperty(ProducerConfig.LINGER_MS_CONFIG, params.get("kafka.linger.ms", "1"));
        props.setProperty(ProducerConfig.BUFFER_MEMORY_CONFIG, params.get("kafka.buffer.memory", "33554432"));
        
        // 压缩配置
        props.setProperty(ProducerConfig.COMPRESSION_TYPE_CONFIG, params.get("kafka.compression.type", "snappy"));
        
        // 超时配置
        props.setProperty(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG, params.get("kafka.request.timeout.ms", "30000"));
        props.setProperty(ProducerConfig.DELIVERY_TIMEOUT_MS_CONFIG, params.get("kafka.delivery.timeout.ms", "120000"));
        
        // 安全配置
        if (params.has("kafka.security.protocol")) {
            props.setProperty("security.protocol", params.get("kafka.security.protocol"));
        }
        if (params.has("kafka.sasl.mechanism")) {
            props.setProperty("sasl.mechanism", params.get("kafka.sasl.mechanism"));
        }
        if (params.has("kafka.sasl.jaas.config")) {
            props.setProperty("sasl.jaas.config", params.get("kafka.sasl.jaas.config"));
        }
        
        return props;
    }
    
    /**
     * 添加监控输出
     */
    private static void addMonitoringOutput(DataStream<Tuple2<String, String>> stream, ParameterTool params) {
        String monitoringType = params.get("monitoring.type", "log");
        
        if ("log".equalsIgnoreCase(monitoringType)) {
            stream.map(tuple -> {
                LOG.info("Processed Kafka message: key={}, value_length={}", 
                        tuple.f0, tuple.f1 != null ? tuple.f1.length() : 0);
                return tuple;
            }).name("Monitoring Logger").uid("monitoring-logger");
        } else if ("metrics".equalsIgnoreCase(monitoringType)) {
            // 可以添加自定义指标
            stream.map(tuple -> {
                // 添加指标逻辑
                return tuple;
            }).name("Metrics Reporter").uid("metrics-reporter");
        }
    }
    
    /**
     * 打印使用说明
     */
    public static void printUsage() {
        System.out.println("Usage: Elasticsearch2KafkaJob [OPTIONS]");
        System.out.println("Required parameters:");
        System.out.println("  --es.hosts <hosts>                    Elasticsearch hosts");
        System.out.println("  --es.indices <indices>               Elasticsearch indices (comma-separated)");
        System.out.println("  --kafka.bootstrap.servers <servers>  Kafka bootstrap servers");
        System.out.println("  --kafka.topic <topic>                Kafka topic name");
        System.out.println("");
        System.out.println("Optional parameters:");
        System.out.println("  --parallelism <number>               Parallelism (default: 4)");
        System.out.println("  --es.query <query>                   Elasticsearch query (default: match_all)");
        System.out.println("  --es.scroll.size <size>              Scroll size (default: 1000)");
        System.out.println("  --es.scroll.timeout <ms>             Scroll timeout (default: 60000)");
        System.out.println("  --es.read.threads <threads>          Read threads per task (default: 4)");
        System.out.println("  --converter.message.format <format>  Message format (json/avro/custom, default: json)");
        System.out.println("  --converter.include.metadata <bool>  Include metadata (default: true)");
        System.out.println("  --converter.enhancement.enabled <bool> Enable data enhancement (default: false)");
        System.out.println("  --kafka.acks <acks>                  Kafka acks (default: all)");
        System.out.println("  --kafka.delivery.guarantee <level>   Delivery guarantee (default: at_least_once)");
        System.out.println("  --monitoring.enabled <boolean>       Enable monitoring (default: false)");
        System.out.println("  --checkpoint.enabled <boolean>       Enable checkpointing (default: true)");
        System.out.println("");
        System.out.println("Example:");
        System.out.println("  java -cp flink-hbase-1.0-SNAPSHOT.jar com.flink.hbase.elasticsearch2kafka.Elasticsearch2KafkaJob \\");
        System.out.println("    --es.hosts localhost:9200 \\");
        System.out.println("    --es.indices user_documents \\");
        System.out.println("    --kafka.bootstrap.servers localhost:9092 \\");
        System.out.println("    --kafka.topic user_events \\");
        System.out.println("    --parallelism 8 \\");
        System.out.println("    --es.read.threads 6 \\");
        System.out.println("    --converter.enhancement.enabled true \\");
        System.out.println("    --monitoring.enabled true");
    }
} 