package com.flink.hbase.kafka2elasticsearch;

import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.java.utils.ParameterTool;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.connector.kafka.source.enumerator.initializer.OffsetsInitializer;
import org.apache.flink.runtime.state.hashmap.HashMapStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.sink.SinkFunction;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.HashMap;
import java.util.Map;
import java.util.Properties;

/**
 * Kafka 到 Elasticsearch 数据管道作业
 * 从 Kafka 消费 JSON 数据，转换为 UserDocument 对象，并写入到 Elasticsearch
 */
public class Kafka2ElasticsearchJob {
    private static final Logger LOG = LoggerFactory.getLogger(Kafka2ElasticsearchJob.class);
    
    public static void main(String[] args) throws Exception {
        // 解析参数
        ParameterTool params = ParameterTool.fromArgs(args);
        
        // 验证必要参数
        validateParameters(params);
        
        // 创建执行环境
        StreamExecutionEnvironment env = createExecutionEnvironment(params);
        
        // 创建 Kafka Source
        KafkaSource<String> kafkaSource = createKafkaSource(params);
        
        // 创建数据流
        DataStream<String> kafkaStream = env.fromSource(kafkaSource, 
                WatermarkStrategy.noWatermarks(), "Kafka Source");
        
        // 转换 JSON 消息为 UserDocument
        DataStream<UserDocument> userDocumentStream = kafkaStream
            .process(createMessageConverter(params))
            .name("JSON Message Converter")
            .uid("json-converter");
        
        // 数据清洗和过滤
        DataStream<UserDocument> cleanedStream = userDocumentStream
            .filter(doc -> doc != null && doc.isValid())
            .name("Data Validation")
            .uid("data-validation");
        
        // 写入到 Elasticsearch
        SinkFunction<UserDocument> elasticsearchSink = createElasticsearchSink(params);
        cleanedStream.addSink(elasticsearchSink)
            .name("Elasticsearch Sink")
            .uid("es-sink");
        
        // 可选：添加监控输出
        if (params.getBoolean("monitoring.enabled", false)) {
            addMonitoringOutput(cleanedStream, params);
        }
        
        // 执行作业
        env.execute("Kafka to Elasticsearch Job");
    }
    
    /**
     * 验证必要参数
     */
    private static void validateParameters(ParameterTool params) {
        String[] requiredParams = {
            "kafka.bootstrap.servers",
            "kafka.topic",
            "kafka.group.id",
            "es.hosts",
            "es.index"
        };
        
        for (String param : requiredParams) {
            if (!params.has(param)) {
                throw new IllegalArgumentException("Missing required parameter: " + param);
            }
        }
        
        LOG.info("Parameter validation completed successfully");
    }
    
    /**
     * 创建执行环境
     */
    private static StreamExecutionEnvironment createExecutionEnvironment(ParameterTool params) {
        Configuration config = new Configuration();
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(config);
        
        // 设置全局参数
        env.getConfig().setGlobalJobParameters(params);
        
        // 设置并行度
        int parallelism = params.getInt("parallelism", 4);
        env.setParallelism(parallelism);
        
        // 设置重启策略
        env.setRestartStrategy(RestartStrategies.fixedDelayRestart(
            params.getInt("restart.attempts", 3),
            org.apache.flink.api.common.time.Time.seconds(params.getInt("restart.delay", 10))
        ));
        
        // 启用检查点
        if (params.getBoolean("checkpoint.enabled", true)) {
            long checkpointInterval = params.getLong("checkpoint.interval", 60000);
            env.enableCheckpointing(checkpointInterval, CheckpointingMode.EXACTLY_ONCE);
            env.getCheckpointConfig().setMinPauseBetweenCheckpoints(5000);
            env.getCheckpointConfig().setCheckpointTimeout(300000);
            env.getCheckpointConfig().setMaxConcurrentCheckpoints(1);
        }
        
        // 设置状态后端
        env.setStateBackend(new HashMapStateBackend());
        
        LOG.info("Execution environment created with parallelism: {}", parallelism);
        return env;
    }
    
    /**
     * 创建 Kafka Source
     */
    private static KafkaSource<String> createKafkaSource(ParameterTool params) {
        String bootstrapServers = params.get("kafka.bootstrap.servers");
        String topic = params.get("kafka.topic");
        String groupId = params.get("kafka.group.id");
        
        // 创建 Kafka Source
        KafkaSource<String> source = KafkaSource.<String>builder()
            .setBootstrapServers(bootstrapServers)
            .setTopics(topic)
            .setGroupId(groupId)
            .setStartingOffsets(getOffsetsInitializer(params))
            .setValueOnlyDeserializer(new SimpleStringSchema())
            .setProperties(createKafkaProperties(params))
            .build();
        
        LOG.info("Kafka source created for topic: {}, group: {}", topic, groupId);
        return source;
    }
    
    /**
     * 获取 Kafka 偏移量初始化器
     */
    private static OffsetsInitializer getOffsetsInitializer(ParameterTool params) {
        String startingOffsets = params.get("kafka.starting.offsets", "latest");
        
        switch (startingOffsets.toLowerCase()) {
            case "earliest":
                return OffsetsInitializer.earliest();
            case "latest":
                return OffsetsInitializer.latest();
            case "committed":
                return OffsetsInitializer.committedOffsets();
            default:
                LOG.warn("Unknown starting offsets: {}, using latest", startingOffsets);
                return OffsetsInitializer.latest();
        }
    }
    
    /**
     * 创建 Kafka 属性
     */
    private static Properties createKafkaProperties(ParameterTool params) {
        Properties props = new Properties();
        
        // 基本配置
        props.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
        props.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
        props.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, params.get("kafka.auto.offset.reset", "latest"));
        props.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
        
        // 性能配置
        props.setProperty(ConsumerConfig.FETCH_MIN_BYTES_CONFIG, params.get("kafka.fetch.min.bytes", "1"));
        props.setProperty(ConsumerConfig.FETCH_MAX_WAIT_MS_CONFIG, params.get("kafka.fetch.max.wait.ms", "500"));
        props.setProperty(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, params.get("kafka.max.poll.records", "500"));
        props.setProperty(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, params.get("kafka.session.timeout.ms", "30000"));
        props.setProperty(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, params.get("kafka.heartbeat.interval.ms", "3000"));
        
        // 安全配置
        if (params.has("kafka.security.protocol")) {
            props.setProperty("security.protocol", params.get("kafka.security.protocol"));
        }
        if (params.has("kafka.sasl.mechanism")) {
            props.setProperty("sasl.mechanism", params.get("kafka.sasl.mechanism"));
        }
        if (params.has("kafka.sasl.jaas.config")) {
            props.setProperty("sasl.jaas.config", params.get("kafka.sasl.jaas.config"));
        }
        
        return props;
    }
    
    /**
     * 创建消息转换器
     */
    private static JsonMessageConverter createMessageConverter(ParameterTool params) {
        boolean enableValidation = params.getBoolean("converter.validation.enabled", true);
        
        if (enableValidation) {
            return JsonMessageConverter.createValidating();
        } else {
            return JsonMessageConverter.create();
        }
    }
    
    /**
     * 创建 Elasticsearch Sink
     */
    private static SinkFunction<UserDocument> createElasticsearchSink(ParameterTool params) {
        String indexName = params.get("es.index");
        int batchSize = params.getInt("es.batch.size", 100);
        long flushInterval = params.getLong("es.flush.interval", 5000);
        boolean asyncMode = params.getBoolean("es.async.mode", false);
        
        // 创建 ES 配置
        Map<String, Object> esConfig = createElasticsearchConfig(params);
        
        ElasticsearchSink.Builder builder = ElasticsearchSink.builder()
            .indexName(indexName)
            .batchSize(batchSize)
            .flushInterval(flushInterval)
            .config(esConfig);
        
        if (asyncMode) {
            return builder.buildAsync();
        } else {
            return builder.build();
        }
    }
    
    /**
     * 创建 Elasticsearch 配置
     */
    private static Map<String, Object> createElasticsearchConfig(ParameterTool params) {
        Map<String, Object> config = new HashMap<>();
        
        // 基本配置
        config.put("hosts", params.get("es.hosts"));
        config.put("scheme", params.get("es.scheme", "http"));
        config.put("date.partitioned", params.getBoolean("es.date.partitioned", false));
        
        // 认证配置
        if (params.has("es.username")) {
            config.put("username", params.get("es.username"));
        }
        if (params.has("es.password")) {
            config.put("password", params.get("es.password"));
        }
        
        // 超时配置
        config.put("connect.timeout", params.getInt("es.connect.timeout", 5000));
        config.put("socket.timeout", params.getInt("es.socket.timeout", 60000));
        config.put("connection.request.timeout", params.getInt("es.connection.request.timeout", 5000));
        
        return config;
    }
    
    /**
     * 添加监控输出
     */
    private static void addMonitoringOutput(DataStream<UserDocument> stream, ParameterTool params) {
        String monitoringType = params.get("monitoring.type", "log");
        
        if ("log".equalsIgnoreCase(monitoringType)) {
            stream.map(doc -> {
                LOG.info("Processed document: userId={}, name={}, timestamp={}", 
                        doc.getUserId(), doc.getName(), doc.getCreateTime());
                return doc;
            }).name("Monitoring Logger").uid("monitoring-logger");
        } else if ("metrics".equalsIgnoreCase(monitoringType)) {
            // 可以添加自定义指标
            stream.map(doc -> {
                // 添加指标逻辑
                return doc;
            }).name("Metrics Reporter").uid("metrics-reporter");
        }
    }
    
    /**
     * 打印使用说明
     */
    public static void printUsage() {
        System.out.println("Usage: Kafka2ElasticsearchJob [OPTIONS]");
        System.out.println("Required parameters:");
        System.out.println("  --kafka.bootstrap.servers <servers>   Kafka bootstrap servers");
        System.out.println("  --kafka.topic <topic>                 Kafka topic name");
        System.out.println("  --kafka.group.id <group>              Kafka consumer group ID");
        System.out.println("  --es.hosts <hosts>                    Elasticsearch hosts");
        System.out.println("  --es.index <index>                    Elasticsearch index name");
        System.out.println("");
        System.out.println("Optional parameters:");
        System.out.println("  --parallelism <number>                Parallelism (default: 4)");
        System.out.println("  --kafka.starting.offsets <offsets>    Starting offsets (default: latest)");
        System.out.println("  --kafka.auto.offset.reset <reset>     Auto offset reset (default: latest)");
        System.out.println("  --es.batch.size <size>                ES batch size (default: 100)");
        System.out.println("  --es.flush.interval <ms>              ES flush interval (default: 5000)");
        System.out.println("  --es.async.mode <boolean>             Enable ES async mode (default: false)");
        System.out.println("  --es.date.partitioned <boolean>       Enable date partitioning (default: false)");
        System.out.println("  --converter.validation.enabled <bool> Enable data validation (default: true)");
        System.out.println("  --monitoring.enabled <boolean>        Enable monitoring (default: false)");
        System.out.println("  --checkpoint.enabled <boolean>        Enable checkpointing (default: true)");
        System.out.println("");
        System.out.println("Example:");
        System.out.println("  java -cp flink-hbase-1.0-SNAPSHOT.jar com.flink.hbase.kafka2elasticsearch.Kafka2ElasticsearchJob \\");
        System.out.println("    --kafka.bootstrap.servers localhost:9092 \\");
        System.out.println("    --kafka.topic user_events \\");
        System.out.println("    --kafka.group.id flink-es-consumer \\");
        System.out.println("    --es.hosts localhost:9200 \\");
        System.out.println("    --es.index user_documents \\");
        System.out.println("    --parallelism 4 \\");
        System.out.println("    --es.batch.size 200 \\");
        System.out.println("    --es.async.mode true \\");
        System.out.println("    --monitoring.enabled true");
    }
} 