package com.cn.daimajiangxin.flink.sink;

import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.connector.elasticsearch.sink.Elasticsearch7SinkBuilder;
import org.apache.flink.connector.elasticsearch.sink.ElasticsearchSink;
import org.apache.flink.connector.jdbc.JdbcConnectionOptions;
import org.apache.flink.connector.jdbc.JdbcExecutionOptions;
import org.apache.flink.connector.jdbc.JdbcStatementBuilder;
import org.apache.flink.connector.jdbc.core.datastream.sink.JdbcSink;
import org.apache.flink.connector.jdbc.datasource.statements.SimpleJdbcQueryStatement;
import org.apache.flink.connector.kafka.sink.KafkaRecordSerializationSchema;
import org.apache.flink.connector.kafka.sink.KafkaSink;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.connector.kafka.source.enumerator.initializer.OffsetsInitializer;
import org.apache.flink.connector.kafka.source.reader.deserializer.KafkaRecordDeserializationSchema;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.util.Collector;
import org.apache.http.HttpHost;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.client.Requests;

import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.time.LocalDateTime;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;

public class MultiSinkPipeline {
        public static void main(String[] args) throws Exception {
                // 1. 创建执行环境并配置检查点
                StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
                env.enableCheckpointing(5000);

                // 2. 创建Kafka Source
                KafkaSource<String> source = KafkaSource.<String>builder()
                                .setBootstrapServers("192.168.0.199:9092")
                                .setTopics("logs-input-topic")
                                .setGroupId("flink-group")
                                .setStartingOffsets(OffsetsInitializer.earliest())
                                .setDeserializer(new KafkaRecordDeserializationSchema<String>() {
                                        @Override
                                        public void deserialize(ConsumerRecord<byte[], byte[]> record,
                                                        Collector<String> out) throws IOException {
                                                String value = new String(record.value(), StandardCharsets.UTF_8);
                                                out.collect(value);
                                        }

                                        @Override
                                        public TypeInformation<String> getProducedType() {
                                                return TypeInformation.of(String.class);
                                        }
                                })
                                // 添加Kafka客户端属性以提高稳定性
                                .setProperty("enable.auto.commit", "false") // 由Flink管理偏移量提交
                                .setProperty("session.timeout.ms", "45000")
                                .setProperty("max.poll.interval.ms", "300000")
                                .setProperty("heartbeat.interval.ms", "10000")
                                .setProperty("retry.backoff.ms", "1000")
                                .setProperty("reconnect.backoff.max.ms", "10000")
                                .setProperty("reconnect.backoff.ms", "1000")
                                .build();

                // 3. 读取数据并解析
                DataStream<String> kafkaStream = env.fromSource(source, WatermarkStrategy.noWatermarks(),
                                "Kafka Source");

                // 解析日志数据
                DataStream<LogEntry> logStream = kafkaStream
                                .map(line -> {
                                        String[] parts = line.split("\\|");
                                        return new LogEntry(parts[0], parts[1], parts[2], parts[3]);
                                })
                                .name("Log Parser");

                // 4. 过滤错误日志
                DataStream<LogEntry> errorLogStream = logStream
                                .filter(log -> "ERROR".equals(log.getLogLevel()))
                                .name("Error Log Filter");

                // 5. 配置并添加Kafka Sink - 输出错误日志
                // Kafka配置
                Properties props = new Properties();
                props.setProperty("bootstrap.servers", "192.168.0.199:9092");

                // 创建Kafka Sink
                KafkaSink<LogEntry> kafkaSink = KafkaSink.<LogEntry>builder()
                                .setKafkaProducerConfig(props)
                                .setRecordSerializer(KafkaRecordSerializationSchema.<LogEntry>builder()
                                                .setTopic("error-logs-topic")
                                                .setValueSerializationSchema(element -> element.toString().getBytes())
                                                .build())
                                .build();

                errorLogStream.sinkTo(kafkaSink).name("Error Logs Kafka Sink");

                // 6. 配置并添加Elasticsearch Sink - 存储所有日志
                // 配置Elasticsearch节点
                HttpHost httpHost = new HttpHost("192.168.0.199", 9200, "http");

                ElasticsearchSink<LogEntry> esSink = new Elasticsearch7SinkBuilder<LogEntry>()
                                .setBulkFlushMaxActions(10) // 批量操作数量
                                .setBulkFlushInterval(5000) // 批量刷新间隔（毫秒）
                                .setHosts(httpHost)
                                .setConnectionRequestTimeout(60000) // 连接请求超时时间
                                .setConnectionTimeout(60000) // 连接超时时间
                                .setSocketTimeout(60000) // Socket 超时时间
                                .setEmitter((element, context, indexer) -> {
                                        Map<String, Object> json = new HashMap<>();
                                        json.put("timestamp", element.getTimestamp());
                                        json.put("logLevel", element.getLogLevel());
                                        json.put("source", element.getSource());
                                        json.put("message", element.getMessage());
                                        IndexRequest request = Requests.indexRequest()
                                                        .index("logs_index")
                                                        .source(json);
                                        indexer.add(request);
                                })
                                .build();

                logStream.sinkTo(esSink).name("Elasticsearch Sink");

                // 7. 配置并添加JDBC Sink - 存储错误日志统计
                // 先进行统计
                DataStream<LogStats> statsStream = errorLogStream
                                .map(log -> new LogStats(log.getSource(), 1))
                                .keyBy(LogStats::getSource)
                                .sum("count")
                                .name("Error Log Stats");
                JdbcExecutionOptions jdbcExecutionOptions = JdbcExecutionOptions.builder()
                                .withBatchSize(1000)
                                .withBatchIntervalMs(200)
                                .withMaxRetries(5)
                                .build();
                JdbcConnectionOptions connectionOptions = new JdbcConnectionOptions.JdbcConnectionOptionsBuilder()
                                .withUrl("jdbc:mysql://192.168.0.199:3306/test")
                                .withDriverName("com.mysql.cj.jdbc.Driver")
                                .withUsername("root")
                                .withPassword("1qaz@WSX")
                                .build();
                String insertSql = "INSERT INTO error_log_stats (source, count, last_updated) VALUES (?, ?, ?) " +
                                "ON DUPLICATE KEY UPDATE count = count + VALUES(count), last_updated = VALUES(last_updated)";
                JdbcStatementBuilder<LogStats> statementBuilder = (statement, stats) -> {
                        statement.setString(1, stats.getSource());
                        statement.setLong(2, stats.getCount());
                        statement.setTimestamp(3, java.sql.Timestamp.valueOf(LocalDateTime.now()));
                };
                // 创建JDBC Sink
                JdbcSink<LogStats> jdbcSink = JdbcSink.<LogStats>builder()
                                .withQueryStatement(new SimpleJdbcQueryStatement<LogStats>(insertSql, statementBuilder))
                                .withExecutionOptions(jdbcExecutionOptions)
                                .buildAtLeastOnce(connectionOptions);
                statsStream.sinkTo(jdbcSink).name("JDBC Sink");
                // 8. 执行作业
                env.execute("Multi-Sink Data Pipeline");
        }

}