package com.test.flink.monitor;

import com.alibaba.fastjson.JSON;
import org.apache.flink.api.common.RuntimeExecutionMode;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.java.functions.KeySelector;
import org.apache.flink.api.java.tuple.Tuple;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.KeyedStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.windowing.AllWindowFunction;
import org.apache.flink.streaming.api.functions.windowing.ProcessWindowFunction;
import org.apache.flink.streaming.api.windowing.time.Time;
import org.apache.flink.streaming.api.windowing.windows.TimeWindow;
import org.apache.flink.streaming.util.keys.KeySelectorUtil;
import org.apache.flink.util.Collector;

import java.util.List;

public class MonitorJob {

    public static void main(String[] args) throws Exception {

        final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        // 设置并行数
//        env.setParallelism(3);

        // 批处理模式，但批处理模式不支持消费Kafka数据，因为Kafka是流数据类型
        // env.setRuntimeMode(RuntimeExecutionMode.BATCH);

        //2.准备数据

//        final HashSet<TopicPartition> partitionSet = new HashSet<>(Arrays.asList(
//                new TopicPartition("topic-a", 0),    // Partition 0 of topic "topic-a"
//                new TopicPartition("topic-b", 5)));  // Partition 5 of topic "topic-b"
//        KafkaSource.builder().setPartitions(partitionSet);

        KafkaSource<String> source = KafkaSource.<String>builder()
                .setBootstrapServers("localhost:9092")
                .setTopics("test")
                .setGroupId("flink")
                .setValueOnlyDeserializer(new SimpleStringSchema())
                .build();

        DataStreamSource<String> stream = env.fromSource(
                source,
                WatermarkStrategy.noWatermarks(),
                "kafka source"
        );

        //3.设置flink 任务
        SingleOutputStreamOperator<Tuple2<String, List<Double>>> flatMap = stream.flatMap(new FlatMapFunction<String, Tuple2<String, List<Double>>>() {
            @Override
            public void flatMap(String s, Collector<Tuple2<String, List<Double>>> collector) throws Exception {
                MonitorData data = JSON.parseObject(s, MonitorData.class);
                collector.collect(Tuple2.of(data.getUserId(), data.getData()));
            }
        });
        KeyedStream<Tuple2<String, List<Double>>, String> keyedStream = flatMap.keyBy(new KeySelector<Tuple2<String, List<Double>>, String>() {
            @Override
            public String getKey(Tuple2<String, List<Double>> stringListTuple2) throws Exception {
                return stringListTuple2.f0;
            }
        });
        keyedStream.timeWindow(Time.seconds(20)).process(new ProcessWindowFunction<Tuple2<String, List<Double>>, MonitorData, String, TimeWindow>() {
            @Override
            public void process(String key, Context context, Iterable<Tuple2<String, List<Double>>> input, Collector<MonitorData> out) throws Exception {
                MonitorData monitorData = new MonitorData();
                monitorData.setUserId(key);
                for (Tuple2<String,List<Double>> in:input){
                    monitorData.addData(in.f1);
                }
                out.collect(monitorData);
            }
        });

        // Execute program, beginning computation.
        env.execute("flink and kafka connection");
    }
}
