package com.atbeijing.gmall.realtime.app.dwd;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.alibaba.ververica.cdc.connectors.mysql.MySQLSource;
import com.alibaba.ververica.cdc.connectors.mysql.table.StartupOptions;
import com.alibaba.ververica.cdc.debezium.DebeziumSourceFunction;
import com.atbeijing.gmall.realtime.app.func.DimSink;
import com.atbeijing.gmall.realtime.app.func.MyDeserializationSchemaFunction;
import com.atbeijing.gmall.realtime.app.func.MyProcessFunction;
import com.atbeijing.gmall.realtime.bean.TableProcess;
import com.atbeijing.gmall.realtime.utils.MyKafkaUtil;
import org.apache.flink.api.common.functions.FilterFunction;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.runtime.state.filesystem.FsStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.*;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.KafkaSerializationSchema;
import org.apache.flink.util.OutputTag;
import org.apache.kafka.clients.producer.ProducerRecord;

import javax.annotation.Nullable;

/**
 * 从kafka读取ods层用户行业务数据connect配置表数据 进行分流
 */
public class BaseDBApp {
    public static void main(String[] args) throws Exception {
        //TODO 1.基本环境准备
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(4);

//        //TODO 2.设置检查点
//        //设置flink精准一次性保证(默认) 每10000ms开始一次checkpoint
//        env.enableCheckpointing(10*1000L, CheckpointingMode.EXACTLY_ONCE);
//        //checkpoint必须在2分钟内完成,否则被抛弃
//        env.getCheckpointConfig().setCheckpointTimeout(120*1000L);
//        //设置取消job后是否保留检查点,这里是保存
//        env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
//        //设置检查点重启策略,最多重启三次,间隔3秒
//        env.setRestartStrategy(RestartStrategies.fixedDelayRestart(3, 3000L));
//        //设置检查点存放位置-->状态后端  内存|文件系统|rockDB
//        env.setStateBackend(new FsStateBackend("hdfs://hadoop202:8020/gmall/ck"));
//        //hadoop 用户
//        System.setProperty("HADOOP_USER_NAME","wjg");

        //TODO 3.从kafka中读取数据
        //定义消费主题以及消费者组
        String topic = "ods_base_db_m";
        String groupId = "base_db_app_group";
        //读取数据,4个并行度,读4个分区
        DataStreamSource<String> kafkaDS = env.addSource(MyKafkaUtil.getKafkaSource(topic, groupId));
        //转为json对象
        SingleOutputStreamOperator<JSONObject> jsonObjDS = kafkaDS.map(JSON::parseObject);
        //数据清洗ETL
        SingleOutputStreamOperator<JSONObject> filteredDS = jsonObjDS.filter(new FilterFunction<JSONObject>() {
            @Override
            public boolean filter(JSONObject value) throws Exception {
                boolean flag = value.getString("table") != null
                        && value.getString("table").length() > 0
                        && value.getJSONObject("data") != null
                        && value.getString("data").length() > 5;
                return flag;
            }
        });

        //TODO 4.使用FlinkCDC读取配置表数据
        //创建MySQLSourceFunction
        DebeziumSourceFunction<String> sourceFunction =
                MySQLSource.<String>builder()
                        .hostname("hadoop202")
                        .port(3306)
                        .username("root")
                        .password("123456")
                        .databaseList("gmall1116_realtime")
                        .tableList("gmall1116_realtime.table_process")
                        .startupOptions(StartupOptions.initial())
                        .deserializer(new MyDeserializationSchemaFunction())//自定义反序列化规则
                        .build();

        //读取配置数据库
        DataStreamSource<String> sqlDS = env.addSource(sourceFunction);
        //定义map状态描述器 <表名:操作类型,配置类>
        MapStateDescriptor<String, TableProcess> mapStateDescriptor = new MapStateDescriptor<>("table-process", String.class, TableProcess.class);
        //配置流转换为广播流
        BroadcastStream<String> broadcastDS = sqlDS.broadcast(mapStateDescriptor);

        //TODO 5.连接主流以及配置广播流,对主流进行分流处理
        //连接主流和配置的广播流,用主流connect广播流
        BroadcastConnectedStream<JSONObject, String> connectedDS = filteredDS.connect(broadcastDS);

        //操作合并流,维度表写到侧输出流
        //定义测输出流
        OutputTag<JSONObject> dimTag = new OutputTag<JSONObject>("dimTag"){};
        //两条流需要操作共享状态,所以将mapStateDescriptor传过去
        SingleOutputStreamOperator realDS = connectedDS.process(new MyProcessFunction(dimTag,mapStateDescriptor));

        //维度表数据
        DataStream dimDS = realDS.getSideOutput(dimTag);

        //TODO 6.将维度侧输出流数据保存到hbase表中
        dimDS.addSink(new DimSink());

        //TODO 7.将事实主流数据写回到kafka的dwd层
        //主流中的不同表数据要写到不同的主题中
        realDS.addSink(
                MyKafkaUtil.getKafkaSinkBySchema(new KafkaSerializationSchema<JSONObject>() {
                    @Override
                    public ProducerRecord<byte[], byte[]> serialize(JSONObject jsonObj, @Nullable Long timestamp) {
                        //主题
                        String topic = jsonObj.getString("sink_table");
                        //数据
                        JSONObject dataJsonObj = jsonObj.getJSONObject("data");
                        return new ProducerRecord<byte[], byte[]>(topic,dataJsonObj.toJSONString().getBytes());
                    }
                })
        );

        env.execute();
    }
}
