package flinkdemo.kafak;


import flinkdemo.kafak.entity.FindMsg;
import flinkdemo.kafak.entity.VideoMsg;
import org.apache.flink.streaming.api.TimeCharacteristic;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer;

import java.util.Properties;

/**
 * 问题
 * 1、flink可以通过设置env.setParallelism(1)来控制 kafak consumer的个数
 * 2、flink中设置 group.id 为相同的值，但是启动两个StreamingJob还是会重复消费 https://www.it1352.com/1932532.html
 * 3、kafka中要设置host为ip否则无法获取消息
 * 4、虹软人脸识别jar放到flink的lib下面。不要打包进去，否则会有重复加载 dll的错误。
 * 5、配置文件同样，在分布式的时候需要每个服务器都copy一份
 */
public class StreamingRemoteJob {

	public static int passRate = 80;
	private static String appId = "BGFUM4yT5M9PRGfd9VgUXMzGbK1jZJer4YJzG77HovXR";
	private static String sdkKey = "AGURJP2c7PRahrMsS9fPFqW6dFGrgXpdQpELRFEyhtoS";
	private static String dllPath = "E:\\code\\ideawork\\hadoopone\\dll";

	public static String consumerTopic = "add-video";
	public static String prodTopic = "find-thing";

	public static void main(String[] args) throws Exception {

		//final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
		String host = "master";
		int port = 8081;
		String jarFiles = "E:\\code\\ideawork\\hadoopone\\target\\hadoopone-0.0.1-SNAPSHOT.jar";
		StreamExecutionEnvironment env = StreamExecutionEnvironment.createRemoteEnvironment(host,port,jarFiles);
		// 使用Flink Kafkaconsumer的容错机制
		env.setParallelism(1);
		env.enableCheckpointing(5000);

		/*env.registerCachedFile("E:\\code\\ideawork\\hadoopone\\src\\main\\resources\\dll","libarcsoft_face.dll");
		env.registerCachedFile("E:\\code\\ideawork\\hadoopone\\src\\main\\resources\\dll","libarcsoft_face_engine.dll");
		env.registerCachedFile("E:\\code\\ideawork\\hadoopone\\src\\main\\resources\\dll","libarcsoft_face_engine_jni.dll");*/

		//设置 状态存储位置
		/*StateBackend sb = new FsStateBackend("hdfs://master:9000/flink-checkpoints");
		env.setStateBackend(sb);*/

		//env.setParallelism(3);windowsALL 不允许并行
		env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
		//env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);

		Properties props = new Properties();
		props.setProperty("bootstrap.servers", "master:9092");
		props.setProperty("group.id", "flink-group");
		props.put("enable.auto.commit", "true");
		//数据源配置，是一个kafka消息的消费者
		FlinkKafkaConsumer<VideoMsg> consumer =
				new FlinkKafkaConsumer<>(consumerTopic, new ConsumerDeserializationSchema(VideoMsg.class), props);
		//---- 消费开始时间

		// - 从最早的记录开始；
		//consumer.setStartFromEarliest()
		//- 从最新记录开始；
        //consumer.setStartFromLatest();
		// 从指定的epoch时间戳（毫秒）开始；
		//consumer.setStartFromTimestamp(null);
		//默认行为，从上次消费的偏移量进行继续消费。
		consumer.setStartFromGroupOffsets();

		//--- 分区消费
		/*Map<KafkaTopicPartition, Long> specificStartOffsets = new HashMap();
		specificStartOffsets.put(new KafkaTopicPartition("myTopic", 0), 23L); // 第一个分区从23L开始
		specificStartOffsets.put(new KafkaTopicPartition("myTopic", 1), 31L); // 第二个分区从31L开始
		specificStartOffsets.put(new KafkaTopicPartition("myTopic", 2), 43L); // 第三个分区从43L开始
		consumer.setStartFromSpecificOffsets(specificStartOffsets);*/







		//增加时间水位设置类
		/*consumer.assignTimestampsAndWatermarks(new AssignerWithPunctuatedWatermarks<String>(){
			@Override
			public long extractTimestamp(String element, long previousElementTimestamp) {
				*//*long etlTime = JSONHelper.getTimeLongFromRawMessage(element);
				previousElementTimestamp = Math.max(etlTime, previousElementTimestamp);
				return previousElementTimestamp;*//*
				//System.out.println(new Date(JSONHelper.getTimeLongFromRawMessage(element)));
				return JSONHelper.getTimeLongFromRawMessage(element);
			}
			@Override
			public Watermark checkAndGetNextWatermark(String lastElement, long extractedTimestamp) {
				if (lastElement != null) {
					return new Watermark(JSONHelper.getTimeLongFromRawMessage(lastElement));
				}
				return null;
			}
		});*/


		Properties properties = new Properties();
		properties.setProperty("bootstrap.servers", "master:9092");
		properties.setProperty("transaction.timeout.ms",1000*60*5+"");
		FlinkKafkaProducer<FindMsg> producer = new FlinkKafkaProducer(prodTopic, new ProductSerializationSchema(prodTopic),properties,FlinkKafkaProducer.Semantic.NONE);

		//1.没有 Window 操作之前，我们对数据的处理是：来一条计算一条。
		//2.有了 Window 操作之后，我们便可以使用 Window 操作，将无界的数据流通过划分窗口

		env.addSource(consumer)
				//将图片转为BGR字节数组
				.flatMap(new ImgProcessingFlatMap())
				//.setParallelism(2)
				//识别人脸返回识别结果
				.flatMap(new ImgFlatMap(dllPath,appId,sdkKey,passRate))
				//.setParallelism(4)
				//按照摄像头id分组
				//.keyBy(	0)
				//countWindow
				//.countWindow(3)
				//进行人脸识别返回结果
				//.process(new FaceProcessWindowFunction())
				.addSink(producer);
				//.addSink(new ImgWindowFaceSink());

				/* //需要hadoop支持
				.timeWindow(Time.milliseconds(10))
				.process(new TimeProcessWindowFunction())
				.addSink(new ImgWindowFaceSink());*/


				//.addSink(new ImgFaceSink());

				//输出方式是STDOUT
				//.print();
		env.execute("Flink-Kafka demo");
	}

}

