package com.atguigu.flink.edu.app.dim;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.atguigu.flink.edu.app.fun.DimSinkFunction;
import com.atguigu.flink.edu.app.fun.TableProcessFunction;
import com.atguigu.flink.edu.beans.TableProcess;
import com.atguigu.flink.edu.utils.MyKafkaUtil;
import com.ververica.cdc.connectors.mysql.source.MySqlSource;
import com.ververica.cdc.connectors.mysql.table.StartupOptions;
import com.ververica.cdc.debezium.JsonDebeziumDeserializationSchema;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.api.common.time.Time;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.runtime.state.hashmap.HashMapStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.BroadcastConnectedStream;
import org.apache.flink.streaming.api.datastream.BroadcastStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.ProcessFunction;
import org.apache.flink.table.api.TableResult;
import org.apache.flink.util.Collector;

import java.util.Properties;

public class dimApp {
    public static void main(String[] args) throws Exception {
        //TODO 1: 基本环境设置
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(4);
        env.disableOperatorChaining();
        //TODO 2: 检查点设置
        ////开启检查点 berry是否对齐
        //env.enableCheckpointing(5000, CheckpointingMode.EXACTLY_ONCE);
        ////设置检查点超时时间
        //env.getCheckpointConfig().setCheckpointTimeout(60000L);
        ////设置job取消后检查点是否保存
        //env.getCheckpointConfig().setExternalizedCheckpointCleanup(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
        ////设置检查点之间最小间隔时间
        //env.getCheckpointConfig().setMinPauseBetweenCheckpoints(2000L);
        ////设置重启策略
        //env.setRestartStrategy(RestartStrategies.failureRateRestart(3, Time.days(30),Time.seconds(3L)));
        ////设置状态后端
        //env.setStateBackend(new HashMapStateBackend());
        //env.getCheckpointConfig().setCheckpointStorage("hdfs://hadoop001:8020/edu/ck");
        ////设置操作hadoop的用户
        //System.setProperty("HADOOP_USER_NAME","atguigu");
        //TODO 3: 从kafka中读取业务数据
        String topic = "topic_db";
        String groupId = "dim_app_group";
        KafkaSource<String> kafkaSource = MyKafkaUtil.getKafkaSource(topic, groupId);
        //TODO 4: 将读取的数据转换为流
        DataStreamSource<String> sourceDs
                = env.fromSource(kafkaSource, WatermarkStrategy.noWatermarks(), "kafka_source");
        //sourceDs.print("==>");
        //TODO 5: 将流数据类型转换为JSON对象类型
        //对读取的数据进行简单的ETL数据清洗,并进行格式转换:转为JISOOBJ
        SingleOutputStreamOperator<JSONObject> jsonObjDS = sourceDs.process(new ProcessFunction<String, JSONObject>() {
            @Override
            public void processElement(String jsonStr, Context context, Collector<JSONObject> collector) throws Exception {

                JSONObject jsonObj = JSON.parseObject(jsonStr);
//                String type = jsonObj.getString("type");
//                if (!"bootstrap-start".equals(type) && !"bootstrap-complete".equals(type)) {
                    collector.collect(jsonObj);
//                }
            }

        });
        //jsonObjDS.print("==>");
        //TODO 6: FlinkCDC读取配置表信息
        Properties ps = new Properties();
        ps.setProperty("useSSL", "false");
        MySqlSource<String> mysqlSource = MySqlSource.<String>builder()
                .jdbcProperties(ps)
                .port(3306)
                .hostname("hadoop001")
                .databaseList("edu_config")
                .tableList("edu_config.table_process")
                .username("root")
                .password("000000")
                .startupOptions(StartupOptions.initial())
                .deserializer(new JsonDebeziumDeserializationSchema())
                .build();
        DataStreamSource<String> mysqlDs
                = env.fromSource(mysqlSource, WatermarkStrategy.noWatermarks(), "mysqlDs");
        //TODO 7: 对配置表信息与kafka业务数据进行关联
        //mysqlDs.print();
        //7.1 对读取的source进行广播
        MapStateDescriptor<String, TableProcess> mapStateDescriptor = new MapStateDescriptor<String, TableProcess>("mapState",String.class, TableProcess.class);
        BroadcastStream<String> broadcastDs = mysqlDs.broadcast(mapStateDescriptor);
        BroadcastConnectedStream<JSONObject, String> connect = jsonObjDS.connect(broadcastDs);
        //TODO 8: 对关联数据进行封装
        SingleOutputStreamOperator<JSONObject> processDs = connect.process(new TableProcessFunction(mapStateDescriptor));

        //processDs.print();
        //TODO 9: 将数据写入到HBASE(Phoenix)
        processDs.addSink(new DimSinkFunction());
        env.execute();
    }
}
