package com.cn.daimajiangxin.flink.transformation;

import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.functions.AggregateFunction;
import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.typeinfo.TypeHint;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.api.java.tuple.Tuple3;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.connector.kafka.source.enumerator.initializer.OffsetsInitializer;
import org.apache.flink.connector.kafka.source.reader.deserializer.KafkaRecordDeserializationSchema;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.KeyedStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.windowing.ProcessWindowFunction;
import org.apache.flink.streaming.api.windowing.assigners.GlobalWindows;
import org.apache.flink.streaming.api.windowing.time.Time;
import org.apache.flink.streaming.api.windowing.triggers.CountTrigger;
import org.apache.flink.streaming.api.windowing.windows.GlobalWindow;
import org.apache.flink.streaming.api.windowing.windows.TimeWindow;
import org.apache.flink.streaming.api.windowing.assigners.TumblingProcessingTimeWindows;
import org.apache.flink.util.Collector;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.time.Duration;
import java.util.Date;

public class LogAnalysisDemo {
    private static final Logger LOG = LoggerFactory.getLogger(LogAnalysisDemo.class);
    private static final SimpleDateFormat DATE_FORMAT = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");

    public static void main(String[] args) throws Exception {
        // 1. 创建Flink流执行环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        // 启用检查点
        env.enableCheckpointing(10000); // 每10秒做一次检查点
        env.getCheckpointConfig().setCheckpointTimeout(60000); // 检查点超时时间60秒
        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(5000); // 检查点之间最小暂停时间
        env.getCheckpointConfig().setMaxConcurrentCheckpoints(1); // 最大并发检查点数量
        // 设置全局并行度
        env.setParallelism(4);

        // 2. 配置Kafka参数
        String kafkaBootstrapServers = "192.168.0.199:9092";
        String topic = "app_logs";
        String consumerGroup = "flink-log-analysis";

        // 3. 定义Kafka Source
        KafkaSource<String> kafkaSource = KafkaSource.<String>builder()
                .setBootstrapServers(kafkaBootstrapServers)
                .setTopics(topic)
                .setGroupId(consumerGroup)
                .setDeserializer(new KafkaRecordDeserializationSchema<String>() {
                    @Override
                    public void deserialize(ConsumerRecord<byte[], byte[]> record, Collector<String> out)
                            throws IOException {
                        String value = new String(record.value(), StandardCharsets.UTF_8);
                        out.collect(value);
                    }

                    @Override
                    public TypeInformation<String> getProducedType() {
                        return TypeInformation.of(String.class);
                    }
                })
                .setStartingOffsets(OffsetsInitializer.earliest())
                // 添加Kafka客户端属性以提高稳定性
                .setProperty("enable.auto.commit", "false") // 由Flink管理偏移量提交
                .setProperty("session.timeout.ms", "45000")
                .setProperty("max.poll.interval.ms", "300000")
                .setProperty("heartbeat.interval.ms", "10000")
                .setProperty("retry.backoff.ms", "1000")
                .setProperty("reconnect.backoff.max.ms", "10000")
                .setProperty("reconnect.backoff.ms", "1000")
                .build();

        // 4. 从Kafka读取数据
        DataStream<String> logStream = env.fromSource(
                kafkaSource,
                WatermarkStrategy.noWatermarks(),
                "Kafka Log Source");

        // 5. 解析日志数据
        DataStream<LogEntry> parsedLogStream = logStream.flatMap(new FlatMapFunction<String, LogEntry>() {
            @Override
            public void flatMap(String value, Collector<LogEntry> out) throws Exception {
                try {
                    String[] parts = value.split("\\|", 3);
                    if (parts.length == 3) {
                        Date timestamp = DATE_FORMAT.parse(parts[0]);
                        String logLevel = parts[1];
                        String message = parts[2];
                        LogEntry entry = new LogEntry(timestamp, logLevel, message);
                        LOG.info("Parsed log entry: {}", entry);
                        out.collect(entry);
                    } else {
                        LOG.warn("Failed to parse log entry (wrong part count): {}", value);
                    }
                } catch (ParseException e) {
                    LOG.warn("Failed to parse log entry: {}", value, e);
                } catch (Exception e) {
                    LOG.error("Unexpected error while parsing log entry: {}", value, e);
                }
            }
        });

        // 6. 统计日志级别分布
        KeyedStream<LogEntry, String> levelKeyedStream = parsedLogStream.keyBy(entry -> entry.getLogLevel());
        DataStream<Tuple2<String, Long>> levelCountStream = levelKeyedStream
                .window(TumblingProcessingTimeWindows.of(Duration.ofMinutes(1))) // 每1分钟统计一次
                .aggregate(
                        new AggregateFunction<LogEntry, Long, Long>() {
                            @Override
                            public Long createAccumulator() {
                                return 0L;
                            }

                            @Override
                            public Long add(LogEntry value, Long accumulator) {
                                return accumulator + 1;
                            }

                            @Override
                            public Long getResult(Long accumulator) {
                                return accumulator;
                            }

                            @Override
                            public Long merge(Long a, Long b) {
                                return a + b;
                            }
                        },
                        new ProcessWindowFunction<Long, Tuple2<String, Long>, String, TimeWindow>() {
                            @Override
                            public void process(String level, Context context, Iterable<Long> elements,
                                    Collector<Tuple2<String, Long>> out) throws Exception {
                                long count = elements.iterator().next();
                                out.collect(new Tuple2<>(level, count));
                            }
                        });
        levelCountStream.print("LogLevelCount");

        // 7. 统计错误日志数量
        DataStream<LogEntry> errorLogStream = parsedLogStream.filter(entry -> entry.getLogLevel().equals("ERROR"));
        KeyedStream<LogEntry, String> errorKeyedStream = errorLogStream.keyBy(entry -> "ERROR"); // 所有错误日志为同一个键
        DataStream<Tuple2<String, Long>> errorCountStream = errorKeyedStream
                .window(TumblingProcessingTimeWindows.of(Duration.ofMinutes(1)))
                .aggregate(
                        new AggregateFunction<LogEntry, Long, Long>() {
                            @Override
                            public Long createAccumulator() {
                                return 0L;
                            }

                            @Override
                            public Long add(LogEntry value, Long accumulator) {
                                return accumulator + 1;
                            }

                            @Override
                            public Long getResult(Long accumulator) {
                                return accumulator;
                            }

                            @Override
                            public Long merge(Long a, Long b) {
                                return a + b;
                            }
                        },
                        new ProcessWindowFunction<Long, Tuple2<String, Long>, String, TimeWindow>() {
                            @Override
                            public void process(String key, Context context, Iterable<Long> elements,
                                    Collector<Tuple2<String, Long>> out) {
                                long count = elements.iterator().next();
                                out.collect(new Tuple2<>("ERROR_COUNT", count));
                            }
                        });

        errorCountStream.print("ErrorCount");

        // 8. 按小时统计日志量
        DataStream<Tuple2<String, LogEntry>> hourlyLogStream = parsedLogStream
                .map(new MapFunction<LogEntry, Tuple2<String, LogEntry>>() {
                    @Override
                    public Tuple2<String, LogEntry> map(LogEntry entry) throws Exception {
                        String hourKey = new SimpleDateFormat("yyyy-MM-dd HH").format(entry.getTimestamp());
                        return new Tuple2<>(hourKey, entry);
                    }
                }).returns(new TypeHint<Tuple2<String, LogEntry>>() {
                });

        KeyedStream<Tuple2<String, LogEntry>, String> hourlyKeyedStream = hourlyLogStream.keyBy(tuple -> tuple.f0);
        DataStream<Tuple3<String, Long, Long>> hourlyCountStream = hourlyKeyedStream
                .window(TumblingProcessingTimeWindows.of(Duration.ofHours(1)))
                .aggregate(
                        new AggregateFunction<Tuple2<String, LogEntry>, Long, Long>() {
                            @Override
                            public Long createAccumulator() {
                                return 0L;
                            }

                            @Override
                            public Long add(Tuple2<String, LogEntry> value, Long accumulator) {
                                return accumulator + 1;
                            }

                            @Override
                            public Long getResult(Long accumulator) {
                                return accumulator;
                            }

                            @Override
                            public Long merge(Long a, Long b) {
                                return a + b;
                            }
                        },
                        new ProcessWindowFunction<Long, Tuple3<String, Long, Long>, String, TimeWindow>() {
                            @Override
                            public void process(String hour, Context context, Iterable<Long> elements,
                                    Collector<Tuple3<String, Long, Long>> out) {
                                long count = elements.iterator().next();
                                out.collect(new Tuple3<>(hour, count, context.window().getEnd()));
                            }
                        });

        hourlyCountStream.print("HourlyLogCount");

        // 9. 启动任务
        env.execute("Log Analysis Demo");
    }

}