package com.atguigu.app.dim;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONException;
import com.alibaba.fastjson.JSONObject;
import com.atguigu.bean.TableProcess;
import com.atguigu.function.DimSinkFunc;
import com.atguigu.function.MyBroadcastFunction;
import com.atguigu.util.KafkaUtil;
import com.ververica.cdc.connectors.mysql.source.MySqlSource;
import com.ververica.cdc.connectors.mysql.table.StartupOptions;
import com.ververica.cdc.debezium.JsonDebeziumDeserializationSchema;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.functions.FilterFunction;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.streaming.api.datastream.BroadcastConnectedStream;
import org.apache.flink.streaming.api.datastream.BroadcastStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.sink.RichSinkFunction;

/**
 * @author yhm
 * @create 2022-11-16 14:24
 */
public class DimSinkApp {
    public static void main(String[] args) throws Exception {
        // 1. 创建env环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        // 2. 对环境添加设置
        env.setParallelism(1);

        /*
        env.enableCheckpointing(3000L, CheckpointingMode.EXACTLY_ONCE);
        env.getCheckpointConfig().setCheckpointTimeout(60 * 1000L);
        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(3000L);
        env.getCheckpointConfig().enableExternalizedCheckpoints(
                CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION
        );
        env.setRestartStrategy(RestartStrategies.failureRateRestart(
                10, Time.of(1L, TimeUnit.DAYS), Time.of(3L, TimeUnit.MINUTES)
        ));
        env.setStateBackend(new HashMapStateBackend());
        env.getCheckpointConfig().setCheckpointStorage("hdfs://hadoop102:8020/gmall/ck");

        // 修改当前程序的用户名  获取写入hdfs的权限
        System.setProperty("HADOOP_USER_NAME", "atguigu");
         */


        // TODO 1 读取kafka的ods层数据topic_db
        String topicName = "topic_db";
        String groupId = "DimSinkApp";
        DataStreamSource<String> kafkaDbStream = env.addSource(KafkaUtil.getFlinkKafkaConsumer(topicName, groupId));

//        kafkaDbStream.print("kafka=> ");

        // TODO 2 过滤出脏数据
        // 过滤掉不是json的数据以及类型为bootstrap-start和bootstrap-complete的数据
        SingleOutputStreamOperator<String> filterStream = kafkaDbStream.filter(new FilterFunction<String>() {
            @Override
            public boolean filter(String value) throws Exception {
                try {
                    JSONObject jsonObject = JSON.parseObject(value);
                    String type = jsonObject.getString("type");
                    if ("bootstrap-start".equals(type) || "bootstrap-complete".equals(type)) {
                        return false;
                    }
                } catch (JSONException e) {
                    // 过滤不是json的数据
                    return false;
                }
                return true;
            }
        });

        filterStream.print("filter >>");

        // TODO 3 使用flinkCDC监控配置表 读取维度表数据
        // 配置表内容  ->  包含维度表目录  完整的在phoenix的建表语句
        MySqlSource<String> mySqlSource = MySqlSource.<String>builder()
                .hostname("hadoop102")
                .port(3306)
                .username("root")
                .password("123456")
                .deserializer(new JsonDebeziumDeserializationSchema())
                .databaseList("gmall_config")
                // 监控的表格  要写库名+表名
                .tableList("gmall_config.table_process")
                .startupOptions(StartupOptions.initial())  // 读取监控表格的位置
                .build();

        DataStreamSource<String> mysqlSource = env.fromSource(mySqlSource, WatermarkStrategy.noWatermarks(), "mysqlSource").setParallelism(1);
//        mysqlSource.print("mysql>>");

        // TODO 4 将维度表数据改为广播流
        MapStateDescriptor<String, TableProcess> tableProcessStateDes = new MapStateDescriptor<>("table_process", String.class, TableProcess.class);

        BroadcastStream<String> broadcastStream = mysqlSource.broadcast(tableProcessStateDes);

        // TODO 5 合并两条流
        // union -> 相同的数据类型到一起
        // connect -> 两条流分别处理  会使用到对方流的数据
        BroadcastConnectedStream<String, String> connectedStream = filterStream.connect(broadcastStream);

        // TODO 6 处理得到维度表数据
        SingleOutputStreamOperator<JSONObject> process = connectedStream.process(new MyBroadcastFunction(tableProcessStateDes));

        process.print("process>>>>");

        // TODO 7 写出到hbase
        process.addSink(new DimSinkFunc());

        // TODO 8 执行任务
        env.execute(groupId);
    }
}
