package com.flink.hbase.kafka2elasticsearch.timepartition;

import com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.connector.kafka.source.enumerator.initializer.OffsetsInitializer;
import org.apache.flink.runtime.jobmanager.scheduler.SlotSharingGroup;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.sink.SinkFunction;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.OffsetResetStrategy;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.IOException;
import java.time.Duration;
import java.util.*;
import java.util.concurrent.TimeUnit;

/**
 * Kafka 到 Elasticsearch 时间分区作业
 * 支持按业务时间创建月度索引分区和别名管理
 */
public class KafkaToElasticsearchTimePartitionJob {
    private static final Logger LOG = LoggerFactory.getLogger(KafkaToElasticsearchTimePartitionJob.class);
    
    private final JobConfig jobConfig;
    private StreamExecutionEnvironment env;
    
    public KafkaToElasticsearchTimePartitionJob(JobConfig jobConfig) {
        this.jobConfig = jobConfig;
    }
    
    /**
     * 运行作业
     */
    public void run() throws Exception {
        LOG.info("Starting Kafka to Elasticsearch Time Partition Job");
        
        // 初始化流处理环境
        initializeStreamingEnvironment();
        
        // 配置重启策略
        configureRestartStrategy();
        
        // 创建 Kafka 数据源
        KafkaSource<String> kafkaSource = createKafkaSource();
        
        // 构建数据流
        DataStream<String> kafkaStream = env.fromSource(
            kafkaSource, 
            WatermarkStrategy.noWatermarks(), 
            "Kafka-Source"
        );
        
        // 设置 Kafka 消费者并行度
        kafkaStream = kafkaStream.setParallelism(jobConfig.getKafkaParallelism());
        
        // 时间提取和分区
        BusinessTimeExtractor timeExtractor = createTimeExtractor();
        DataStream<BusinessTimeExtractor.TimePartitionedMessage> timePartitionedStream = 
            kafkaStream.process(new BusinessTimeExtractor.TimeExtractionFunction(timeExtractor, jobConfig.getIndexName()))
                      .setParallelism(jobConfig.getProcessingParallelism())
                      .name("Time-Extraction");
        
        // 创建 ES Sink
        SinkFunction<BusinessTimeExtractor.TimePartitionedMessage> esSink = createElasticsearchSink();
        
        // 添加到 ES
        timePartitionedStream.addSink(esSink)
                           .setParallelism(jobConfig.getEsParallelism())
                           .name("ES-Sink");
        
        // 执行作业
        env.execute("Kafka-to-ES-TimePartition-Job");
    }
    
    /**
     * 初始化流处理环境
     */
    private void initializeStreamingEnvironment() {
        env = StreamExecutionEnvironment.getExecutionEnvironment();
        
        // 设置全局并行度
        env.setParallelism(jobConfig.getGlobalParallelism());
        
        // 设置 Checkpoint
        if (jobConfig.isCheckpointEnabled()) {
            env.enableCheckpointing(jobConfig.getCheckpointInterval());
            env.getCheckpointConfig().setMinPauseBetweenCheckpoints(jobConfig.getCheckpointMinPause());
            env.getCheckpointConfig().setCheckpointTimeout(jobConfig.getCheckpointTimeout());
            env.getCheckpointConfig().setMaxConcurrentCheckpoints(1);
        }
        
        // 设置状态后端
        if (jobConfig.getStateBackend() != null) {
            env.setStateBackend(jobConfig.getStateBackend());
        }
        
        // 配置网络缓冲区
        Configuration config = new Configuration();
        config.setString("taskmanager.network.memory.fraction", "0.2");
        config.setString("taskmanager.network.memory.min", "128mb");
        config.setString("taskmanager.network.memory.max", "1gb");
        
        LOG.info("Streaming environment initialized with parallelism: {}", jobConfig.getGlobalParallelism());
    }
    
    /**
     * 配置重启策略
     */
    private void configureRestartStrategy() {
        env.setRestartStrategy(RestartStrategies.fixedDelayRestart(
            jobConfig.getRestartAttempts(),
            org.apache.flink.api.common.time.Time.of(jobConfig.getRestartDelay(), TimeUnit.MILLISECONDS)
        ));
    }
    
    /**
     * 创建 Kafka 数据源
     */
    private KafkaSource<String> createKafkaSource() {
        LOG.info("Creating Kafka source for topics: {}", String.join(", ", jobConfig.getKafkaTopics()));
        
        // 构建 Kafka 消费者配置
        Properties kafkaProps = new Properties();
        kafkaProps.putAll(jobConfig.getKafkaProperties());
        
        // 优化配置
        kafkaProps.put(ConsumerConfig.FETCH_MIN_BYTES_CONFIG, "1024");
        kafkaProps.put(ConsumerConfig.FETCH_MAX_WAIT_MS_CONFIG, "500");
        kafkaProps.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, "1000");
        kafkaProps.put(ConsumerConfig.RECEIVE_BUFFER_CONFIG, "65536");
        kafkaProps.put(ConsumerConfig.SEND_BUFFER_CONFIG, "131072");
        kafkaProps.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "30000");
        kafkaProps.put(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, "3000");
        kafkaProps.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, "300000");
        
        // 确定起始偏移量
        OffsetsInitializer offsetsInitializer;
        switch (jobConfig.getKafkaOffsetReset().toLowerCase()) {
            case "earliest":
                offsetsInitializer = OffsetsInitializer.earliest();
                break;
            case "latest":
                offsetsInitializer = OffsetsInitializer.latest();
                break;
            case "committed":
                offsetsInitializer = OffsetsInitializer.committedOffsets(OffsetResetStrategy.LATEST);
                break;
            default:
                offsetsInitializer = OffsetsInitializer.latest();
        }
        
        return KafkaSource.<String>builder()
                .setBootstrapServers(jobConfig.getKafkaBootstrapServers())
                .setTopics(jobConfig.getKafkaTopics())
                .setGroupId(jobConfig.getKafkaGroupId())
                .setStartingOffsets(offsetsInitializer)
                .setValueOnlyDeserializer(new SimpleStringSchema())
                .setProperties(kafkaProps)
                .build();
    }
    
    /**
     * 创建时间提取器
     */
    private BusinessTimeExtractor createTimeExtractor() {
        return BusinessTimeExtractor.builder()
                .timeFields(jobConfig.getTimeFields())
                .defaultTimeZone(jobConfig.getDefaultTimeZone())
                .useEventTime(jobConfig.isUseEventTime())
                .useProcessingTime(jobConfig.isUseProcessingTime())
                .build();
    }
    
    /**
     * 创建 Elasticsearch Sink
     */
    private SinkFunction<BusinessTimeExtractor.TimePartitionedMessage> createElasticsearchSink() {
        LOG.info("Creating Elasticsearch sink for index: {}", jobConfig.getIndexName());
        
        // 创建索引管理器
        ElasticsearchIndexManager indexManager = ElasticsearchIndexManager.builder()
                .esConfig(jobConfig.getEsConfig())
                .indexTemplate(jobConfig.getIndexName())
                .indexSettings(jobConfig.getIndexSettings())
                .indexMappings(jobConfig.getIndexMappings())
                .build();
        
        // 创建 ES Sink
        return TimePartitionedElasticsearchSink.builder()
                .batchSize(jobConfig.getEsBatchSize())
                .flushInterval(jobConfig.getEsFlushInterval())
                .esConfig(jobConfig.getEsConfig())
                .indexManager(indexManager)
                .useAlias(jobConfig.isUseAlias())
                .documentIdField(jobConfig.getDocumentIdField())
                .build();
    }
    
    /**
     * 主方法
     */
    public static void main(String[] args) throws Exception {
        // 解析命令行参数
        JobConfig jobConfig = parseArguments(args);
        
        // 创建并运行作业
        KafkaToElasticsearchTimePartitionJob job = new KafkaToElasticsearchTimePartitionJob(jobConfig);
        job.run();
    }
    
    /**
     * 解析命令行参数
     */
    private static JobConfig parseArguments(String[] args) {
        JobConfig.Builder builder = JobConfig.builder();
        
        for (int i = 0; i < args.length; i += 2) {
            if (i + 1 >= args.length) {
                throw new IllegalArgumentException("Missing value for argument: " + args[i]);
            }
            
            String key = args[i];
            String value = args[i + 1];
            
            switch (key) {
                case "--kafka-brokers":
                    builder.kafkaBootstrapServers(value);
                    break;
                case "--kafka-topics":
                    builder.kafkaTopics(Arrays.asList(value.split(",")));
                    break;
                case "--kafka-group-id":
                    builder.kafkaGroupId(value);
                    break;
                case "--kafka-offset-reset":
                    builder.kafkaOffsetReset(value);
                    break;
                case "--es-hosts":
                    builder.esHosts(value);
                    break;
                case "--es-username":
                    builder.esUsername(value);
                    break;
                case "--es-password":
                    builder.esPassword(value);
                    break;
                case "--index-name":
                    builder.indexName(value);
                    break;
                case "--document-id-field":
                    builder.documentIdField(value);
                    break;
                case "--time-fields":
                    builder.timeFields(value.split(","));
                    break;
                case "--timezone":
                    builder.defaultTimeZone(value);
                    break;
                case "--kafka-parallelism":
                    builder.kafkaParallelism(Integer.parseInt(value));
                    break;
                case "--processing-parallelism":
                    builder.processingParallelism(Integer.parseInt(value));
                    break;
                case "--es-parallelism":
                    builder.esParallelism(Integer.parseInt(value));
                    break;
                case "--es-batch-size":
                    builder.esBatchSize(Integer.parseInt(value));
                    break;
                case "--es-flush-interval":
                    builder.esFlushInterval(Long.parseLong(value));
                    break;
                case "--checkpoint-interval":
                    builder.checkpointInterval(Long.parseLong(value));
                    break;
                case "--config-file":
                    builder = loadConfigFromFile(value);
                    break;
                default:
                    LOG.warn("Unknown argument: {}", key);
            }
        }
        
        return builder.build();
    }
    
    /**
     * 从配置文件加载配置
     */
    private static JobConfig.Builder loadConfigFromFile(String configFile) {
        try {
            ObjectMapper mapper = new ObjectMapper();
            Map<String, Object> config = mapper.readValue(
                KafkaToElasticsearchTimePartitionJob.class.getResourceAsStream("/" + configFile),
                Map.class
            );
            
            return JobConfig.fromMap(config);
        } catch (IOException e) {
            throw new RuntimeException("Failed to load config file: " + configFile, e);
        }
    }
    
    /**
     * 打印用法说明
     */
    private static void printUsage() {
        System.out.println("Usage: java -cp <classpath> " + KafkaToElasticsearchTimePartitionJob.class.getName());
        System.out.println("Options:");
        System.out.println("  --kafka-brokers <brokers>           Kafka bootstrap servers");
        System.out.println("  --kafka-topics <topics>             Kafka topics (comma-separated)");
        System.out.println("  --kafka-group-id <group-id>         Kafka consumer group ID");
        System.out.println("  --kafka-offset-reset <reset>        Kafka offset reset strategy");
        System.out.println("  --es-hosts <hosts>                  Elasticsearch hosts");
        System.out.println("  --es-username <username>            Elasticsearch username");
        System.out.println("  --es-password <password>            Elasticsearch password");
        System.out.println("  --index-name <name>                 Base index name");
        System.out.println("  --document-id-field <field>         Document ID field");
        System.out.println("  --time-fields <fields>              Time fields (comma-separated)");
        System.out.println("  --timezone <timezone>               Default timezone");
        System.out.println("  --kafka-parallelism <num>           Kafka source parallelism");
        System.out.println("  --processing-parallelism <num>      Processing parallelism");
        System.out.println("  --es-parallelism <num>              ES sink parallelism");
        System.out.println("  --es-batch-size <size>              ES batch size");
        System.out.println("  --es-flush-interval <ms>            ES flush interval");
        System.out.println("  --checkpoint-interval <ms>          Checkpoint interval");
        System.out.println("  --config-file <file>                Configuration file");
        System.out.println("");
        System.out.println("Example:");
        System.out.println("  java -cp flink-hbase-1.0-SNAPSHOT.jar \\" +
                          KafkaToElasticsearchTimePartitionJob.class.getName());
        System.out.println("    --kafka-brokers localhost:9092 \\");
        System.out.println("    --kafka-topics user_events \\");
        System.out.println("    --kafka-group-id es-consumer \\");
        System.out.println("    --es-hosts localhost:9200 \\");
        System.out.println("    --index-name user_events \\");
        System.out.println("    --time-fields timestamp,create_time \\");
        System.out.println("    --kafka-parallelism 4 \\");
        System.out.println("    --es-parallelism 2");
    }
    
    /**
     * 获取优化建议
     */
    public static class OptimizationRecommendations {
        
        public static void printKafkaOptimizations() {
            System.out.println("=== Kafka 并行度优化建议 ===");
            System.out.println("1. Kafka 消费者并行度设置:");
            System.out.println("   - 建议设置为 Topic 分区数，最大不超过分区数");
            System.out.println("   - 例如：Topic 有 8 个分区，设置 --kafka-parallelism 8");
            System.out.println("");
            System.out.println("2. 获取 Topic 分区信息:");
            System.out.println("   kafka-topics --bootstrap-server localhost:9092 --describe --topic <topic-name>");
            System.out.println("");
            System.out.println("3. 消费者组优化:");
            System.out.println("   - 每个分区只能被一个消费者实例消费");
            System.out.println("   - 消费者数量 = 分区数量 (最优)");
            System.out.println("   - 消费者数量 > 分区数量 (浪费资源)");
            System.out.println("");
        }
        
        public static void printProcessingOptimizations() {
            System.out.println("=== 处理并行度优化建议 ===");
            System.out.println("1. 处理并行度设置:");
            System.out.println("   - 建议设置为 CPU 核心数的 2-4 倍");
            System.out.println("   - 例如：8 核 CPU，设置 --processing-parallelism 16-32");
            System.out.println("");
            System.out.println("2. 时间提取优化:");
            System.out.println("   - 优先使用时间戳字段而非字符串解析");
            System.out.println("   - 设置合适的时区避免时间偏移");
            System.out.println("");
        }
        
        public static void printElasticsearchOptimizations() {
            System.out.println("=== Elasticsearch 优化建议 ===");
            System.out.println("1. ES 并行度设置:");
            System.out.println("   - 建议设置为 ES 节点数或稍少");
            System.out.println("   - 例如：3 节点 ES 集群，设置 --es-parallelism 3");
            System.out.println("");
            System.out.println("2. 批量写入优化:");
            System.out.println("   - 批量大小: 100-1000 条记录");
            System.out.println("   - 刷新间隔: 5-30 秒");
            System.out.println("   - 索引刷新间隔: 30s-5m");
            System.out.println("");
            System.out.println("3. 索引分片优化:");
            System.out.println("   - 每个分片 20-40GB");
            System.out.println("   - 分片数 = 数据量(GB) / 30");
            System.out.println("   - 副本数根据可用性需求设置");
            System.out.println("");
        }
        
        public static void printStabilityRecommendations() {
            System.out.println("=== 稳定性和健壮性建议 ===");
            System.out.println("1. Checkpoint 配置:");
            System.out.println("   - 启用 Checkpoint: --checkpoint-interval 60000");
            System.out.println("   - 使用 RocksDB 状态后端处理大状态");
            System.out.println("");
            System.out.println("2. 重启策略:");
            System.out.println("   - 固定延迟重启: 3 次重试，间隔 10 秒");
            System.out.println("   - 失败率重启: 1 小时内最多 3 次重试");
            System.out.println("");
            System.out.println("3. 资源配置:");
            System.out.println("   - JVM 堆内存: 物理内存的 70-80%");
            System.out.println("   - 网络缓冲区: 128MB - 1GB");
            System.out.println("   - 任务槽数: CPU 核心数");
            System.out.println("");
            System.out.println("4. 监控指标:");
            System.out.println("   - Kafka 消费延迟 (Consumer Lag)");
            System.out.println("   - ES 索引速率和错误率");
            System.out.println("   - Flink 背压和 Checkpoint 时间");
            System.out.println("");
        }
    }
} 