package com.steve.bigdata.flinkmonitor;

import com.steve.bigdata.flinkmonitor.assist.EventParser;
import com.steve.bigdata.flinkmonitor.function.EventAggregator;
import com.steve.bigdata.flinkmonitor.function.MetricsMapper;
import com.steve.bigdata.flinkmonitor.model.Event;
import com.steve.bigdata.flinkmonitor.model.WindowResult;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.connector.kafka.source.enumerator.initializer.OffsetsInitializer;
import org.apache.flink.connector.kafka.source.reader.deserializer.KafkaRecordDeserializationSchema;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.windowing.assigners.SlidingEventTimeWindows;
import org.apache.flink.streaming.api.windowing.time.Time;

import java.time.Duration;

/**
 * @Author: STEVE
 * @Description: 主程序入口
 * @since: 2025-07-10
 */
public class MainJob {

    // Kafka配置常量
    private static final String KAFKA_BROKERS = "kafka-broker:9092";
    private static final String INPUT_TOPIC = "input-topic";
    private static final String CONSUMER_GROUP_ID = "sliding-window-monitor";

    public static void main(String[] args) throws Exception {
        // 1. 创建流执行环境
        final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        // 启用检查点（每5秒一次，建议生产环境配置）
        env.enableCheckpointing(5000);
        env.setParallelism(4);  // 根据Kafka分区数设置

        // 2. 创建KafkaSource
        KafkaSource<String> kafkaSource = createKafkaSource();


        // 3. 构建处理管道
        // 3.1 创建原始数据流（不关联watermark）
        DataStreamSource<String> kafkaStrings = env.fromSource(kafkaSource, WatermarkStrategy.noWatermarks(), "Kafka Source");

        // 3.2 转换数据并分配Watermark
        DataStream<Event> events = kafkaStrings
                .map(new EventParser())
                .assignTimestampsAndWatermarks(
                        // 组合策略：5秒乱序容忍 + 1分钟空闲检测
                        WatermarkStrategy.<Event>forBoundedOutOfOrderness(Duration.ofSeconds(5))
                                .withIdleness(Duration.ofMinutes(1))
                                .withTimestampAssigner((event, ts) -> event.getEventTime())
                );

        DataStream<WindowResult> results = events
                // 第二步：按键分区（按用户ID）
                .keyBy(Event::getUserId)
                // 第三步：定义滑动窗口（30分钟窗口，5分钟滑动）
                .window(SlidingEventTimeWindows.of(
                        Time.minutes(30),  // 窗口大小
                        Time.minutes(5)    // 滑动步长
                ))
                // 第四步：允许10秒延迟数据
                .allowedLateness(Time.seconds(10))
                // 第五步：应用聚合函数
                .aggregate(new EventAggregator())
                // 第六步：添加监控指标
                .map(new MetricsMapper());

        // 4. 输出结果（生产环境建议使用Kafka Sink或数据库Sink）
        results.print().setParallelism(1);  // 控制台输出限制并行度

        // 5. 启动作业
        env.execute("Kafka Sliding Window Monitoring");
    }

    /**
     * 创建新版KafkaSource配置
     */
    private static KafkaSource<String> createKafkaSource() {
        return KafkaSource.<String>builder()
                .setBootstrapServers(KAFKA_BROKERS)
                .setTopics(INPUT_TOPIC)
                .setGroupId(CONSUMER_GROUP_ID)
                // 从最早偏移量开始消费（生产环境可根据需要改为latest）
                .setStartingOffsets(OffsetsInitializer.earliest())
                // 使用字符串反序列化器
                .setDeserializer(KafkaRecordDeserializationSchema.valueOnly(new SimpleStringSchema()))
                // 生产环境推荐配置
                .setProperty("partition.discovery.interval.ms", "30000")  // 30秒分区发现
                .setProperty("auto.offset.reset", "latest") // 无提交记录时从最新开始
                .setProperty("commit.offsets.on.checkpoint", "true")    // 检查点时提交偏移量
                .build();
    }

}
