package com.sinozo.data.app.dim;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.sinozo.data.app.dwd.BaseLogApp;
import com.sinozo.data.process.DimSinkFunction;
import com.sinozo.data.process.TableProcessFunction;
import com.sinozo.data.bean.TableProcess;
import com.sinozo.data.common.ConfigConstant;
import com.sinozo.data.common.DataConfig;
import com.sinozo.data.common.DbConfig;
import com.sinozo.data.utils.MyKafkaUtil;
import com.sinozo.data.utils.PathUtil;
import com.ververica.cdc.connectors.mysql.source.MySqlSource;
import com.ververica.cdc.connectors.mysql.table.StartupOptions;
import com.ververica.cdc.debezium.JsonDebeziumDeserializationSchema;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.api.common.time.Time;
import org.apache.flink.runtime.state.hashmap.HashMapStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.BroadcastConnectedStream;
import org.apache.flink.streaming.api.datastream.BroadcastStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.util.Collector;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.concurrent.TimeUnit;

/**
 * @author zhaoyb
 * @version 1.0
 * @description
 * @date 2023/6/12 17:40:35
 */
public class DimSinkApp {
    public static void main(String[] args) {

//        Logger logger = LoggerFactory.getLogger(BaseLogApp.class);
//
//        //1.环境准备
//        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
//        env.setParallelism(DataConfig.KAFKA_PARTITION_NUM);
//
//        //2.状态后端设置
//        env.enableCheckpointing(DataConfig.FLINK_CHECKPOINT_INTERVAL, CheckpointingMode.EXACTLY_ONCE);
//        env.getCheckpointConfig().setCheckpointTimeout(DataConfig.FLINK_CHECKPOINT_TIMEOUT);
//        env.getCheckpointConfig().setMaxConcurrentCheckpoints(2);
//
//        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(3000L);
//        env.getCheckpointConfig().enableExternalizedCheckpoints(
//                CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION
//        );
//        env.setRestartStrategy(RestartStrategies.failureRateRestart(10, Time.of(3L, TimeUnit.DAYS), Time.of(1L, TimeUnit.MINUTES)));
//        env.setStateBackend(new HashMapStateBackend());
//        String hdfsPath = DataConfig.HDFS_SERVER;
//        env.getCheckpointConfig().setCheckpointStorage(PathUtil.getHdfsPath(hdfsPath));
//
//        //3. 读取业务主流
//        String topic = ConfigConstant.KAFKA_DB_TOPIC;
//        String groupId = ConfigConstant.KAFKA_DB_GROUP_ID;
//        DataStreamSource<String> kafkaDS = env.addSource(MyKafkaUtil.getFlinkKafkaConsumer(topic, groupId));
//
//        //4. 主流数据结构转换，主流ETL
//        SingleOutputStreamOperator<JSONObject> filterJsonObjDS = kafkaDS.flatMap(new FlatMapFunction<String, JSONObject>() {
//            @Override
//            public void flatMap(String value, Collector<JSONObject> out) throws Exception {
//                try {
//                    //将数据转换为JSON格式
//                    JSONObject jsonObject = JSON.parseObject(value);
//                    //获取数据中的操作类型字段
//                    String type = jsonObject.getString("type");
//
//                    //保留新增、变化以及初始化数据
//                    if ("insert".equals(type) || "update".equals(type) || "bootstrap-insert".equals(type)) {
//                        out.collect(jsonObject);
//                    }
//                } catch (Exception e) {
//                    logger.error("脏数据：{}", e);
//                }
//            }
//        });
//
//        //5.FlinkCDC 读取配置流并广播流
//        //5.1 FlinkCDC 读取配置表信息
//        MySqlSource<String> mySqlSource = MySqlSource.<String>builder()
//                .hostname(DbConfig.MYSQL_SERVER)
//                .port(DbConfig.MYSQL_PORT)
//                .databaseList(DbConfig.MYSQL_DATA_BASE) // set captured database
//                .tableList(DbConfig.MYSQL_TABLE) // set captured table
//                .username(DbConfig.MYSQL_USER_NAME)
//                .password(DbConfig.MYSQL_PASSWORD)
//                .deserializer(new JsonDebeziumDeserializationSchema()) // converts SourceRecord to JSON String
//                .startupOptions(StartupOptions.initial())
//                .build();
//
//        //5.2封装为流
//        DataStreamSource<String> mysqlSourceDS = env.fromSource(mySqlSource, WatermarkStrategy.noWatermarks(), "MysqlSource");
//
//        //5.3广播配置流
//        MapStateDescriptor<String, TableProcess> mapStateDescriptor = new MapStateDescriptor<>("map-state", String.class, TableProcess.class);
//        BroadcastStream<String> broadcastStream = mysqlSourceDS.broadcast(mapStateDescriptor);
//
//        //6.连接流
//        BroadcastConnectedStream<JSONObject, String> connectedStream = filterJsonObjDS.connect(broadcastStream);
//
//        SingleOutputStreamOperator<JSONObject> dimDS = connectedStream.process(new TableProcessFunction(mapStateDescriptor));
//
//        dimDS.print(">>>>>>>>>>>>");
//        dimDS.addSink(new DimSinkFunction());
//
//        try {
//            env.execute("DimSinkApp");
//        } catch (Exception e) {
//            logger.error("DimSinkApp ERROR, {}", e);
//        }

    }

}





