package com.wudl.realtime.job;

import com.wudl.realtime.avro.AvroDeserializationSchema;
import com.wudl.realtime.avro.SseAvro;
import com.wudl.realtime.avro.SzseAvro;
import com.wudl.realtime.config.QuotConfig;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.time.Time;
import org.apache.flink.runtime.state.filesystem.FsStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.TimeCharacteristic;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer011;

import java.util.Properties;

/**
 * @author ：wudl
 * @date ：Created in 2022-01-05 23:34
 * @description： 个股业务开发
 * @modified By：
 * @version: 1.0
 */

public class StockStream {
    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);
        //3.设置事件时间
//        env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
//        //4.设置检查点机制
//        env.enableCheckpointing(5000);
//        env.setStateBackend(new FsStateBackend("hdfs://node01:8020/checkpoint/stock"));
//        env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
//        env.getCheckpointConfig().setCheckpointTimeout(60000);
//        // 检查点失败继续任务执行
//        env.getCheckpointConfig().setFailOnCheckpointingErrors(false);
//        //最大线程数
//        env.getCheckpointConfig().setMaxConcurrentCheckpoints(1);
//        // 任务取消的时候，保留检查点 --  需要删除老的检查点
//        env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.DELETE_ON_CANCELLATION);

        //5.设置重启机制
        env.setRestartStrategy(RestartStrategies.fixedDelayRestart(3, Time.seconds(5)));


        //  整合kafka(新建反序列化 )
        Properties properties = new Properties();
        properties.setProperty("bootstrap.servers",  QuotConfig.config.getProperty("bootstrap.servers"));
        properties.setProperty("group.id", QuotConfig.config.getProperty("group.id"));

        FlinkKafkaConsumer011<SseAvro> sseSource = new FlinkKafkaConsumer011<SseAvro>(QuotConfig.config.getProperty("sse.topic"), new AvroDeserializationSchema<SseAvro>(QuotConfig.config.getProperty("sse.topic")), properties);

        //4. 设置消费模式
        sseSource.setStartFromLatest();
        //5.//添加source获取DataStream
        DataStreamSource<SseAvro> sseValue = env.addSource(sseSource);
//        sseValue.print();



       //szse：深证
        FlinkKafkaConsumer011<SzseAvro> szseSource = new FlinkKafkaConsumer011<SzseAvro>(QuotConfig.config.getProperty("szse.topic"), new AvroDeserializationSchema<SzseAvro>(QuotConfig.config.getProperty("szse.topic")), properties);
        szseSource.setStartFromLatest();
        DataStreamSource<SzseAvro> szseValue = env.addSource(szseSource);
        szseValue.print("深圳---->");

        //6.触发执行
        env.execute("stock stream");
    }
}
