package com.lm.flink.component;

import com.lm.flink.entry.*;
import com.lm.flink.log.*;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.common.functions.RichFlatMapFunction;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.connector.kafka.source.enumerator.initializer.OffsetsInitializer;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.sink.SinkFunction;
import org.apache.flink.streaming.api.windowing.assigners.TumblingProcessingTimeWindows;
import org.apache.flink.streaming.api.windowing.time.Time;
import org.apache.flink.util.Collector;
import org.apache.kafka.clients.producer.ProducerConfig;
import redis.clients.jedis.Jedis;
import redis.clients.jedis.JedisPool;
import redis.clients.jedis.JedisPoolConfig;

import java.time.Duration;
import java.util.Properties;

public class KafkaToRedisAnalysisJob {

    public static void main(String[] args) throws Exception {
        // 设置执行环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(2); // 设置并行度

        // Kafka 配置
        KafkaSource<String> kafkaSource = KafkaSource.<String>builder()
                .setBootstrapServers("10.2.0.230:9092")
                .setTopics("log-topic")
                .setGroupId("flink-group")
                .setStartingOffsets(OffsetsInitializer.earliest())
                .setValueOnlyDeserializer(new SimpleStringSchema())
                .build();

        // 创建数据流
        DataStream<String> kafkaStream = env.fromSource(
                kafkaSource,
                WatermarkStrategy.forBoundedOutOfOrderness(Duration.ofSeconds(5)),
                "Kafka Source"
        );

        // 数据解析和转换
        DataStream<LogEvent> parsedStream = kafkaStream
                .flatMap(new LogParser())
                .name("Log Parser");

        // 实时统计 - 按日志级别计数
        SingleOutputStreamOperator<LogLevelCount> levelCountStream = parsedStream
                .keyBy(LogEvent::getLevel)
                .window(TumblingProcessingTimeWindows.of(Time.seconds(10)))
                .aggregate(new LogLevelCountAgg(), new LogLevelWindowResult())
                .name("Level Counter");

        // 写入 Redis
        levelCountStream.addSink(new RedisLevelCountSink())
                .name("Redis Level Sink");

        // 实时统计 - 按服务名称计数
        SingleOutputStreamOperator serviceCountStream = parsedStream
                .keyBy(LogEvent::getService)
                .window(TumblingProcessingTimeWindows.of(Time.seconds(10)))
                .aggregate(new ServiceCounter())
                .name("Service Counter");

         //实时统计 - 总日志量
        SingleOutputStreamOperator totalCountStream = parsedStream
                .windowAll(TumblingProcessingTimeWindows.of(Time.seconds(10)))
                .aggregate(new TotalCounter())
                .name("Total Counter");

         //写入 Redis
        levelCountStream.addSink(new RedisLevelCountSink())
                .name("Redis Level Sink");

        serviceCountStream.addSink(new RedisServiceCountSink())
                .name("Redis Service Sink");

        totalCountStream.addSink(new RedisTotalCountSink())
                .name("Redis Total Sink");

        // 执行作业
        env.execute("Kafka Log Analysis to Redis");
    }
}
