package com.ygx.flink.practice.stream;

import com.ygx.flink.practice.model.PvUv;
import com.ygx.flink.practice.model.UserBehavior;
import com.ygx.flink.practice.operators.Accumulator;
import com.ygx.flink.practice.sink.SinkToMysql;
import com.ygx.flink.practice.utils.GsonUtil;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.java.tuple.Tuple;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.configuration.RestOptions;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.windowing.assigners.TumblingProcessingTimeWindows;
import org.apache.flink.streaming.api.windowing.time.Time;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;

import java.text.DateFormat;
import java.text.SimpleDateFormat;
import java.util.Properties;

/**
 * @author YangGuoXiang
 * @version 1.0
 * @date 2022/3/25 13:55
 */
public class KafkaToMysql {
    public static void main(String[] args) throws Exception {
//        Configuration conf = new Configuration();
//        conf.setString(RestOptions.BIND_PORT,"8081-8089");
//        final StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(conf);
//        env.setParallelism(1);

        final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        // start a checkpoint every 1000 ms
        env.enableCheckpointing(1000);

        // advanced options:
        // set mode to exactly-once (this is the default)
        env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);

        // make sure 500 ms of progress happen between checkpoints
        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(500);

        // checkpoints have to complete within one minute, or are discarded
        env.getCheckpointConfig().setCheckpointTimeout(60000);

        // only two consecutive checkpoint failures are tolerated
        env.getCheckpointConfig().setTolerableCheckpointFailureNumber(2);

        // allow only one checkpoint to be in progress at the same time
        env.getCheckpointConfig().setMaxConcurrentCheckpoints(1);

        // enable externalized checkpoints which are retained
        // after job cancellation
//        env.getCheckpointConfig().enableExternalizedCheckpoints(
//                CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
        env.getCheckpointConfig().setExternalizedCheckpointCleanup(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);

        // enables the unaligned checkpoints
        env.getCheckpointConfig().enableUnalignedCheckpoints();

        // sets the checkpoint storage where checkpoint snapshots will be written
        env.getCheckpointConfig().setCheckpointStorage("hdfs://cdh01:8020/flink/checkpoint/StreamKafkaToMysql");
//        env.getCheckpointConfig().setCheckpointStorage("file:///E:/temp/flink-checkpoints/StreamKafkaToMysql");

        Properties properties = new Properties();
        properties.setProperty("bootstrap.servers", "cdh04:9092,cdh05:9092,cdh06:9092");
        properties.setProperty("group.id", "pvuv_stream");
        properties.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        properties.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");

        FlinkKafkaConsumer<String> myConsumer = new FlinkKafkaConsumer<>("user_behavior", new SimpleStringSchema(), properties);
//        myConsumer.setStartFromEarliest();
//        myConsumer.setStartFromLatest();

        DataStream<UserBehavior> stream = env.addSource(myConsumer).name("kafkaSource").map(str-> GsonUtil.fromJsonLower(str, UserBehavior.class)).name("jsonToBean");
//        stream.print("stream");
//        DataStream<Tuple2<String, Integer>> hourStream = stream.map(new MapFunction<UserBehavior, Tuple2<String, Integer>>() {
//            @Override
//            public Tuple2<String, Integer> map(UserBehavior userBehavior) throws Exception {
//                DateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:00");
//                String hourStr = sdf.format(userBehavior.getTs());
//                return Tuple2.of(hourStr, 1);
//            }
//        });

//        DataStream<Tuple2<String, Integer>> sum = hourStream.keyBy(x->x.f0).window(TumblingProcessingTimeWindows.of(Time.seconds(5))).sum(1);
//        sum.print("pv");

        DataStream<PvUv> pvUv = stream
            .keyBy(x->x.getTs().getTime()-x.getTs().getTime()%3600000)
//            .window(TumblingProcessingTimeWindows.of(Time.seconds(5)))
            .process(new Accumulator()).name("accumulator");


        pvUv
                .keyBy(x->x.getDt())
                .window(TumblingProcessingTimeWindows.of(Time.seconds(1)))
                // max() 获取最大值，指定的field值是最大的，但不是对应那条记录
                // maxBy() 获取最大值，指定的field值最是最大的，同时也是对应那条记录
                .maxBy("pv").name("openWindow")
                .addSink(new SinkToMysql()).name("sinkMysql");
//        pvUv.addSink(new SinkToMysql());
//        pvUv.map(x->GsonUtil.toJson(x)).print();
//        pvUv
//                .windowAll(TumblingProcessingTimeWindows.of(Time.seconds(2)))
//            .countWindowAll(5000)
//            .max("pv").map(x->GsonUtil.toJson(x)).print();

        env.execute("StreamKafkaToMysql");
    }
}