package com.zhang.gmall.realtime.app.dwd;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.alibaba.ververica.cdc.connectors.mysql.MySQLSource;
import com.alibaba.ververica.cdc.connectors.mysql.table.StartupOptions;
import com.alibaba.ververica.cdc.debezium.DebeziumSourceFunction;
import com.zhang.gmall.realtime.app.func.DimSinkFunc;
import com.zhang.gmall.realtime.app.func.MyDeserializationSchema;
import com.zhang.gmall.realtime.app.func.TableProcessFunc;
import com.zhang.gmall.realtime.beans.TableProcess;
import com.zhang.gmall.realtime.utils.MyKafkaUtil;
import org.apache.flink.api.common.functions.FilterFunction;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.api.common.typeinfo.Types;
import org.apache.flink.runtime.state.filesystem.FsStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.*;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer;
import org.apache.flink.streaming.connectors.kafka.KafkaSerializationSchema;
import org.apache.flink.util.OutputTag;
import org.apache.kafka.clients.producer.ProducerRecord;

import javax.annotation.Nullable;
import java.nio.charset.StandardCharsets;

/**
 * @title: 动态分流
 * @author: zhang
 * @date: 2022/3/4 18:35
 * 执行流程：zk、kafka、maxwell、hdfs、hbase、BaseDBApp
 * maxwell通过binlog实时采集业务数据到kafka ods_base_db_m_2022主题
 * FlinkCDC读取配置表数据并转化为广播流并创建广播状态
 * connect连接主流和配置流，形成连接流
 * 对连接流进行分流处理：维度数据保存在侧输出流，实时数据保存在kafka
 *  抽取TableProcessFunc类处理分流业务
 *  processBroadcastElement
 *  1.处理广播流中数据封装TableProcess对象，并放入广播状态
 *  3.如果读到的信息是维度信息，则创建维度表
 *  processElement
 *  2.获取广播状态中的配置信息，根据配置信息进行分流
 *  4.分流之前进行字段过滤
 * BaseDBApp
 */
public class BaseDBApp {
    public static void main(String[] args) throws Exception {
        //TODO 1. 获取执行环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(4);
        //TODO 2. 检查点设置
/*        env.enableCheckpointing(5000L, CheckpointingMode.EXACTLY_ONCE);
        //2.2 设置检查点超时时间
        env.getCheckpointConfig().setCheckpointTimeout(60 * 1000L);
        //2.3 设置取消job后，检查点是否保留
        env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
        //2.4 设置重启策略
        //固定次数重启
        env.setRestartStrategy(RestartStrategies.fixedDelayRestart(3, 3000L));
        //失败率重启
       // env.setRestartStrategy(RestartStrategies.failureRateRestart(3, Time.milliseconds(3000), Time.days(30)));
        //2.5 设置检查点间隔时间
        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(3000L);
        //2.6 设置状态后段
        env.setStateBackend(new FsStateBackend("hdfs://hadoop102:8020/flink/gmall-ck"));
        //2.7 设置操作hadoop用户
        System.setProperty("HADOOP_USER_NAME", "zhang");*/

        //TODO 3. 从kafka读取业务数据
        String topic = "ods_base_db_m_2022";
        String groupId = "base_db_app";
        DataStreamSource<String> kafkaDS = env.addSource(MyKafkaUtil.getKafkaSource(topic, groupId));
        //TODO 4. 对读取数据进行结构转换
        SingleOutputStreamOperator<JSONObject> jsonDS = kafkaDS.map(JSON::parseObject);

        //TODO 5. 对读取数据进行简单ETL
        SingleOutputStreamOperator<JSONObject> filterDS = jsonDS.filter(new FilterFunction<JSONObject>() {
            @Override
            public boolean filter(JSONObject value) throws Exception {
                boolean flag =
                        value.getString("table") != null &&
                        value.getString("table").length() > 0 &&
                        value.getJSONObject("data") != null &&
                        value.getString("data").length() > 3;
                return flag;
            }
        });
        //TODO 6. FlinkCDC读取配置信息,转化为流
        DebeziumSourceFunction<String> mysqlSource = MySQLSource.<String>builder()
                .hostname("hadoop103")
                .port(3306)
                .username("root")
                .password("000000")
                .databaseList("gmall_realtime_2022")
                .tableList("gmall_realtime_2022.table.process")
                .startupOptions(StartupOptions.initial())
                .deserializer(new MyDeserializationSchema())
                .build();

        DataStreamSource<String> tableSource = env.addSource(mysqlSource);

        //TODO 7. 定义广播状态,将配置流进行广播
        MapStateDescriptor<String, TableProcess> mapStateDescriptor = new MapStateDescriptor<>("map-state", Types.STRING, Types.POJO(TableProcess.class));
        BroadcastStream<String> broadcastStream = tableSource.broadcast(mapStateDescriptor);

        //TODO 8. 连接两个流 connect() 方法需要由非广播流来进行调用，BroadcastStream 作为参数传入。
        BroadcastConnectedStream<JSONObject, String> connectedStream = filterDS.connect(broadcastStream);

        //TODO 9. 动态分流，分别处理两个流的数据 。维度数据---hbase、事实数据---kafka
        OutputTag<JSONObject> dimTag = new OutputTag<JSONObject>("dimTag") {
        };
        SingleOutputStreamOperator<JSONObject> realDS = connectedStream.process(new TableProcessFunc(dimTag,mapStateDescriptor));

        //TODO 10. 维度数据发送Phoenix，实时数据发送到kafka
        DataStream<JSONObject> hbaseDS = realDS.getSideOutput(dimTag);
        hbaseDS.addSink(new DimSinkFunc());
        hbaseDS.print("hbase");

        realDS.addSink(MyKafkaUtil.getKafkaSinkBySchema(new KafkaSerializationSchema<JSONObject>() {
            @Override
            public ProducerRecord<byte[], byte[]> serialize(JSONObject element, @Nullable Long timestamp) {
                String topic = element.getString("sinkTable");
                String data = element.getString("data");
                return new ProducerRecord<byte[], byte[]>(topic,data.getBytes(StandardCharsets.UTF_8));
            }
        }));
        //打印测试
        realDS.print("kafka");
        //TODO 11. 执行任务
        env.execute("BaseDBApp");

    }
}
