package org.databandtech.flinkstreaming;

import java.util.Properties;

import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer011;

/**
 *cd /usr/app/flink-1.13.2
 *启动
./bin/start-cluster.sh
 *bin/flink run --class org.databandtech.flinkstreaming.App /usr/app/flink-1.13.2/examples/rt.jar
 */
public class KafkaConsumerApp {
	public static void main(String[] args) {

		// 创建Flink执行环境
		StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        // Kafka参数
        Properties properties = new Properties();
        properties.setProperty("bootstrap.servers", "localhost:9092");
        properties.setProperty("group.id", "flink-group");
        String inputTopic = "Hello-Kafka";
        String outputTopic = "Hello-Kafka-out";
        
        //Source
        FlinkKafkaConsumer<String> consumer =
                new FlinkKafkaConsumer<String>(inputTopic, new SimpleStringSchema(), properties);
        
        //设置检查点
        //env.enableCheckpointing(5000); // checkpoint every 5000 msecs
        //设置活动时间
        //env.setStreamTimeCharacteristic(TimeCharacteristic.ProcessingTime);//处理时间：处理时间是指执行相应 算子操作的机器的系统时间
        //env.setStreamTimeCharacteristic(TimeCharacteristic.IngestionTime);//摄取时间是事件进入Flink的时间。
        //env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);//事件时间是每个事件在其生产设备上发生的时间。
        
        //用于控制消费offset
        //consumer.setStartFromEarliest();     // start from the earliest record possible
        //consumer.setStartFromLatest();       // start from the latest record
        //consumer.setStartFromTimestamp(...); // start from specified epoch timestamp (milliseconds)
        //consumer.setStartFromGroupOffsets(); // the default behaviour
        //用于控制特殊消费offset
        //Map<KafkaTopicPartition, Long> specificStartOffsets = new HashMap<>();
        //specificStartOffsets.put(new KafkaTopicPartition("myTopic", 0), 23L);
        //specificStartOffsets.put(new KafkaTopicPartition("myTopic", 1), 31L);
        //specificStartOffsets.put(new KafkaTopicPartition("myTopic", 2), 43L);
        //myConsumer.setStartFromSpecificOffsets(specificStartOffsets);
        
        DataStream<String> stream = env.addSource(consumer);
        
        stream.print(); //仅打印输入
        
        
        try {
			env.execute("kafkaTest");
		} catch (Exception e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
	}
}
