package com.ruoyi.collector.service;

import com.alibaba.fastjson.JSONObject;
import org.apache.flink.api.common.RuntimeExecutionMode;
import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.common.functions.RuntimeContext;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.java.tuple.Tuple3;
import org.apache.flink.shaded.hadoop2.com.google.gson.Gson;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import org.apache.flink.util.Collector;
import com.ruoyi.common.utils.DateUtils;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import org.graylog2.syslog4j.server.*;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.boot.CommandLineRunner;
import org.springframework.stereotype.Component;
import org.springframework.stereotype.Service;

import java.net.SocketAddress;
import java.text.SimpleDateFormat;
import java.util.*;

/**
*@Description: TODO
*@CreateTime: 2022-12-31  15:54
*@Author: chaozi
*@Version: 1.0
*/

@Component
public class FlinkListener {
    public static void main(String[] args) throws Exception {
// TODO env
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setRuntimeMode(RuntimeExecutionMode.AUTOMATIC);

        Properties props = new Properties();
        //集群地址
        props.setProperty("bootstrap.servers", "10.211.3.99:9092,10.211.3.117:9092,10.211.3.181:9092");
        //消费者组id
        props.setProperty("group.id", "test-consumer-group");
        //latest有offset记录从记录位置开始消费,没有记录从最新的/最后的消息开始消费
        //earliest有offset记录从记录位置开始消费,没有记录从最早的/最开始的消息开始消费
        props.setProperty("auto.offset.reset", "latest");
        //会开启一个后台线程每隔5s检测一下Kafka的分区情况,实现动态分区检测
        props.setProperty("flink.partition-discovery.interval-millis", "5000");
        //自动提交(提交到默认主题,后续学习了Checkpoint后随着Checkpoint存储在Checkpoint和默认主题中)
        props.setProperty("enable.auto.commit", "true");
        //自动提交的时间间隔
        props.setProperty("auto.commit.interval.ms", "2000");

        // TODO source
        FlinkKafkaConsumer<String> source = new FlinkKafkaConsumer<>("parse_noder", new SimpleStringSchema(), props);

        DataStreamSource<String> ds = env.addSource(source);

        // TODO transformation
        SingleOutputStreamOperator<Tuple3<String, String, String>> result = ds.flatMap(new FlatMapFunction<String, Tuple3<String, String, String>>() {
            @Override
            public void flatMap(String s, Collector<Tuple3<String, String, String>> collector) throws Exception {
                try{
                    JSONObject json = (JSONObject) JSONObject.parse(s);
                    System.out.println(json);
                }catch (Exception e){

                }
            }
        });
// TODO sink
        result.print();
        env.execute("FlinkListener");
    }
}
