package com.rwind;

import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.common.functions.ReduceFunction;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.java.utils.MultipleParameterTool;
import org.apache.flink.api.java.utils.ParameterTool;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.connector.kafka.source.enumerator.initializer.OffsetsInitializer;
import org.apache.flink.metrics.MetricGroup;
import org.apache.flink.runtime.state.filesystem.FsStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.TimeCharacteristic;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.operators.StreamingRuntimeContext;
import org.apache.flink.streaming.api.windowing.time.Time;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumerBase;
import org.apache.flink.streaming.connectors.kafka.config.OffsetCommitMode;
import org.apache.flink.streaming.connectors.kafka.internals.AbstractFetcher;
import org.apache.flink.streaming.connectors.kafka.internals.AbstractPartitionDiscoverer;
import org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition;
import org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicsDescriptor;
import org.apache.flink.streaming.connectors.kafka.table.KafkaConnectorOptions;
import org.apache.flink.util.Collector;
import org.apache.flink.util.SerializedValue;

import java.util.Collection;
import java.util.Map;
import java.util.Properties;

public class KafkaTest {

    //flink run -c com.rwind.SocketWindowWordCount /home/rwind/task/wordcount-1.0-SNAPSHOT.jar --port 9000
    //$ ./bin/flink run examples/streaming/SocketWindowWordCount.jar --port 9000
    //nc -l 9000
    /*
    $ nc -l 9000
      lorem ipsum
ipsum ipsum ipsum
bye
.out 文件在每次时间窗口结束后输出统计总数：

$ tail -f log/flink-*-taskexecutor-*.out
lorem : 1
bye : 1
ipsum : 4
     */
    public static void main(String[] args) throws Exception {

        MultipleParameterTool params = MultipleParameterTool.fromArgs(args);


        // get the execution environment
        final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        //env.setStreamTimeCharacteristic(TimeCharacteristic.ProcessingTime);
        /*
        //开启flink的checkpoint功能：每隔1000ms启动一个检查点（设置checkpoint的声明周期）
        env.enableCheckpointing(1000);
//checkpoint高级选项设置
//设置checkpoint的模式为exactly-once（这也是默认值）
        env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
//确保检查点之间至少有500ms间隔（即checkpoint的最小间隔）
        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(500);
//确保检查必须在1min之内完成，否则就会被丢弃掉（即checkpoint的超时时间）
        env.getCheckpointConfig().setCheckpointTimeout(60000);
//同一时间只允许操作一个检查点
        env.getCheckpointConfig().setMaxConcurrentCheckpoints(1);
//程序即使被cancel后，也会保留checkpoint数据，以便根据实际需要恢复到指定的checkpoint
        // env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
//设置statebackend,指定state和checkpoint的数据存储位置(checkpoint的数据必须得有一个可以持久化存储的地方）
        //env.setStateBackend(new FsStateBackend("hdfs://s101:9000/flink/checkpoints"));
        */
        Properties properties = new Properties();
        properties.setProperty("bootstrap.servers","192.168.99.55:9092");
        properties.setProperty("group.id","consumer-groupx");
        properties.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        properties.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        properties.setProperty("auto.offset.reset", "latest");
        //自动提交偏移量
        properties.setProperty("enable.auto.commit","true");
        properties.setProperty("auto.commit.interval.ms","2000");

        KafkaSource<String> source = KafkaSource.<String>builder()
                .setBootstrapServers("192.168.99.55:9092")
                .setGroupId("coupon_consume")
                .setTopics("event_topic")
                .setStartingOffsets(OffsetsInitializer.latest())
                .setValueOnlyDeserializer(new SimpleStringSchema())
                .build();
        //DataStreamSource<String> kafkaDS = env.fromSource(source, WatermarkStrategy.noWatermarks(), "kafka source");
        //kafkaDS.print();
        /**
         * FlinkKafkaConsumer
         * 第一个参数 topic，定义了从哪些主题中读取数据。可以是一个topic，也可以是 topic
         * 列表，还可以是匹配所有想要读取的 topic 的正则表达式。当从多个 topic 中读取数据
         * 时，Kafka 连接器将会处理所有 topic 的分区，将这些分区的数据放到一条流中去
         * 第二个参数是一个DeserializationSchema 或者 KeyedDeserializationSchema。
         * Kafka 消息被存储为原始的字节数据，所以需要反序列化成 Java 或者Scala 对象
         * 第三个参数是一个 Properties 对象，设置了Kafka 客户端的一些属性
         */
        DataStreamSource<String> couponConsume = env.addSource(new FlinkKafkaConsumer<String>("coupon_consume", new SimpleStringSchema(), properties));
        couponConsume.print("couponSource");
        env.execute("Socket Window WordCount");
    }

    // Data type for words with count
    public static class WordWithCount {

        public String word;
        public long count;

        public WordWithCount() {
        }

        public WordWithCount(String word, long count) {
            this.word = word;
            this.count = count;
        }

        @Override
        public String toString() {
            return word + " : " + count;
        }
    }
}