package com.test.flink.monitor;

import com.alibaba.fastjson.JSON;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.java.functions.KeySelector;
import org.apache.flink.api.java.io.RowCsvInputFormat;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.core.fs.Path;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.KeyedStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.windowing.ProcessWindowFunction;
import org.apache.flink.streaming.api.windowing.time.Time;
import org.apache.flink.streaming.api.windowing.windows.GlobalWindow;
import org.apache.flink.streaming.api.windowing.windows.TimeWindow;
import org.apache.flink.util.Collector;

import java.util.ArrayList;
import java.util.List;

public class MonitorJobDemo {

    public static void main(String[] args) throws Exception {

        final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        // 设置并行数
//        env.setParallelism(3);

        // 批处理模式，但批处理模式不支持消费Kafka数据，因为Kafka是流数据类型
        // env.setRuntimeMode(RuntimeExecutionMode.BATCH);

        //2.准备数据
        ArrayList<String> list = new ArrayList<>();
        list.add("{'data':[0.023,0.003,0.043],'userId':'user1'}");
        list.add("{'data':[0.021,0.002,0.027],'userId':'user2'}");
        list.add("{'data':[0.031,0.001,0.042],'userId':'user1'}");
        list.add("{'data':[0.031,0.001,0.044],'userId':'user1'}");

        DataStreamSource<String> stream = env.fromCollection(list);

        //3.设置flink 任务
        //3.1 数据格式转换
        SingleOutputStreamOperator<MonitorData> flatMap = stream.flatMap(new FlatMapFunction<String, MonitorData>() {
            @Override
            public void flatMap(String s, Collector<MonitorData> collector) throws Exception {
                MonitorData data = JSON.parseObject(s, MonitorData.class);
                collector.collect(data);
            }
        });
        //3.2 以userId为key进行分组
        KeyedStream<MonitorData, String> keyedStream = flatMap.keyBy(new KeySelector<MonitorData, String>() {
            @Override
            public String getKey(MonitorData data) throws Exception {
                return data.getUserId();
            }
        });
        //3.3 全量聚合，并进行数据处理
        //全量聚合countWindow(3)：当某个key值中数据的量积累到3时才会执行process操作，除数量累积还有时间累积TimeWindow。
        SingleOutputStreamOperator<MonitorData> process = keyedStream.countWindow(3)
                .process(new ProcessWindowFunction<MonitorData, MonitorData, String, GlobalWindow>() {
            @Override
            public void process(String key, Context context, Iterable<MonitorData> input, Collector<MonitorData> out) throws Exception {
                // key指当前的key，context上下文，input是此key下的所有数据存量，out是要输出的数据
                MonitorData monitorData = new MonitorData();
                monitorData.setUserId(key);
                // 1 汇聚此key下的所有数据
                for (MonitorData in : input) {
                    monitorData.addData(in.getData());
                }
                // 2 数据操作，计算心率等

                // 3 将计算的数据保存到out中
                out.collect(monitorData);
            }
        });
        process.print();
        // Execute program, beginning computation.
        env.execute("flink and kafka connection");
    }

}
