package com.nucarf.bi.digtalize;

import com.alibaba.fastjson.JSONObject;
import com.nucarf.bi.digtalize.bean.BinLogRecord;
import com.nucarf.bi.digtalize.bean.SinkKafkaBean;
import com.nucarf.bi.digtalize.config.KafkaConfig;
import com.nucarf.bi.digtalize.function.*;
import com.nucarf.bi.digtalize.util.PropertyUtil;
import com.ververica.cdc.connectors.mysql.MySqlSource;
import com.ververica.cdc.connectors.mysql.table.StartupOptions;
import com.ververica.cdc.debezium.DebeziumSourceFunction;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.runtime.state.storage.FileSystemCheckpointStorage;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer;

public class DigitalizePushCrm {
    public static void main(String[] args) {

        try {
            //1.创建执行环境
            StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
            //env.setParallelism(1);
            //env.disableOperatorChaining();
            //2.Flink-CDC将读取binlog的位置信息以状态的方式保存在CK,如果想要做到断点续传,需要从Checkpoint或者Savepoint启动程序
            //2.1 开启Checkpoint,每隔秒钟做一次CK
            env.enableCheckpointing(Long.parseLong(PropertyUtil.getPropertyValue("flink.checkpoint.roll")));
            //2.2 指定CK的一致性语义
            env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
            //2.3 设置任务关闭的时候保留最后一次CK数据
            env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
            //2.4 指定从CK自动重启策略
            env.setRestartStrategy(RestartStrategies.fixedDelayRestart(Integer.parseInt(PropertyUtil.getPropertyValue("flink.deploy.retry")),
                    Long.parseLong(PropertyUtil.getPropertyValue("flink.deploy.between.attempts"))));
            //2.5 设置状态后端
            env.getCheckpointConfig().setCheckpointStorage(new FileSystemCheckpointStorage(PropertyUtil.getPropertyValue("hdfs.path")));
            //2.6 设置访问HDFS的用户名
            System.setProperty("HADOOP_USER_NAME", PropertyUtil.getPropertyValue("flink.hadoop.user.name"));
            System.setProperty("HADOOP_CONF_DIR", PropertyUtil.getPropertyValue("flink.hadoop.conf.dir"));
            //2.7 设置savepoint超时时间
            env.getCheckpointConfig().setCheckpointTimeout(Long.parseLong(PropertyUtil.getPropertyValue("flink.checkpoint.timeout")));
            //3.创建Flink-MySQL-CDC的Source
            DebeziumSourceFunction<JSONObject> mysqlSource = MySqlSource.<JSONObject>builder()
                    .hostname(PropertyUtil.getPropertyValue("mysql.host"))
                    .port(3306)
                    .username(PropertyUtil.getPropertyValue("mysql.user"))
                    .password(PropertyUtil.getPropertyValue("mysql.password"))
                    .databaseList(PropertyUtil.getPropertyValue("mysql.database"))
                    .tableList(PropertyUtil.getPropertyValue("mysql.database.table")) //可选配置项,如果不指定该参数,则会读取上一个配置下的所有表的数据，注意：指定的时候需要使用"db.table"的方式
                    .startupOptions(StartupOptions.initial())
                    .deserializer(new CdcJsonDeserializationSchema())
                    .build();

            //4.使用CDC Source从MySQL读取数据
            DataStreamSource<JSONObject> mysqlDS = env.addSource(mysqlSource);
            //5.转换成binlog对应实体类
            SingleOutputStreamOperator<BinLogRecord> BinLogRecordStream = mysqlDS.flatMap(new FlatMap2BinLogFunction());
            SingleOutputStreamOperator<SinkKafkaBean> sinkStream = BinLogRecordStream.flatMap(new Map2SinkBeanFunction());
            SingleOutputStreamOperator<SinkKafkaBean> processStream = sinkStream.keyBy(e -> e.getCus_code()).process(new ProcessOrderFunction());
            FlinkKafkaProducer<SinkKafkaBean> producer = new FlinkKafkaProducer<>(
                    "",
                    new KafkaSinkSchema(),
                    KafkaConfig.getKafkaConfig(),
                    FlinkKafkaProducer.Semantic.NONE);
            processStream.addSink(producer);
            //processStream.print();
            //6.执行任务
            env.execute("digitalize-push-crm");
        } catch (Exception e) {
            e.printStackTrace();
        }
    }
}
