package cn.kgc.gmall.cdc;

import com.alibaba.fastjson.JSONObject;
import com.alibaba.ververica.cdc.connectors.mysql.MySQLSource;
import com.alibaba.ververica.cdc.connectors.mysql.table.StartupOptions;
import com.alibaba.ververica.cdc.debezium.DebeziumDeserializationSchema;
import io.debezium.data.Envelope;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.runtime.state.filesystem.FsStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.source.SourceFunction;
import org.apache.flink.util.Collector;
import org.apache.kafka.connect.data.Field;
import org.apache.kafka.connect.data.Schema;
import org.apache.kafka.connect.data.Struct;
import org.apache.kafka.connect.source.SourceRecord;



/**
 * flinkStream
 */
public class Flink_CDC_Stream {
    public static void main(String[] args) throws Exception {

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);
        //TODO 2.开启检查点   Flink-CDC将读取binlog的位置信息以状态的方式保存在CK,如果想要做到断点续传,
        // 需要从Checkpoint或者Savepoint启动程序
        //2.1 开启Checkpoint,每隔5秒钟做一次CK  ,并指定CK的一致性语义
        env.enableCheckpointing(5000L, CheckpointingMode.EXACTLY_ONCE);
        //2.2 设置超时时间为1分钟
        env.getCheckpointConfig().setCheckpointTimeout(60000);
        //2.3 指定从CK自动重启策略
        env.setRestartStrategy(RestartStrategies.fixedDelayRestart(2,2000L));
        //2.4 设置任务关闭的时候保留最后一次CK数据
        env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
        //2.5 设置状态后端
        env.setStateBackend(new FsStateBackend("hdfs://hadoop102:8020/flinkCDC"));
        //2.6 设置访问HDFS的用户名
        System.setProperty("HADOOP_USER_NAME", "atkgc");


        // 类名.builder.build 构建对象的方式  构建者模式
        SourceFunction<String> sourceFunction = MySQLSource.<String>builder()
                .hostname("hadoop102")
                .port(3306)
                // 配置你要导入的数据库 可以写多个数据库
                .databaseList("gmall_2022_realtime")
                // 配置你要导入的表
                .tableList("gmall_2022_realtime.user")
                .username("root")
                .password("root")
                // 启动选项 initial:启动的时候扫描全表将所有的数据扫描出来再监控binlog文件
                // earliest: 启动的时候只加载binlog中的数据
                // 利用检查点保留机制
                .startupOptions(StartupOptions.initial())
                // 使用自己定义的序列化器
                .deserializer(new MyDeserializer())
                .build();

        DataStreamSource<String> stringDS = env.addSource(sourceFunction);
        stringDS.print();

        env.execute("Print MySQL Snapshot + Binlog");
    }

    public static class MyDeserializer implements DebeziumDeserializationSchema<String> {

        /**
         * 序列化方法
         * ConnectRecord{
         * 		 value=Struct{
         * 			 after=Struct{
         * 				name=张晓森,age=13
         *                          },
         * 			 connector=
         * 				db=gmall_2022_realtime,
         * 				table=user
         * 		},
         * 	 op=c,
         * 	 ts_ms=1662003236731* 		 }
         * 	 }
         * @param sourceRecord
         * @param collector
         * @throws Exception
         */
        @Override
        public void deserialize(SourceRecord sourceRecord, Collector<String> collector) throws Exception {
            // 强转为struct类型
            Struct value = (Struct) sourceRecord.value();
            // 获取表结构体
            Struct source = value.getStruct("source");
            // 获取数据库
            String db = source.getString("db");
            // 获取表
            String table = source.getString("table");
            // 获取数据的结构体
            Struct after = value.getStruct("after");
            // String op = value.getString("op");
            // 获取操作类型
            String type = Envelope.operationFor(sourceRecord).toString().toLowerCase();
            if (type.equals("create")){
                type = "insert";
            }
            // 获取操作时间
            Long ts_ms = value.getInt64("ts_ms");
            // 转化为json对象
            JSONObject jsonObject = new JSONObject();
            jsonObject.put("database",db);
            jsonObject.put("table",table);
            jsonObject.put("type",type);
            jsonObject.put("ts",ts_ms);

            JSONObject dateJson = new JSONObject();
            // 动态获取数据
            // 既要获取名字还要获取值
            if (after != null){
                Schema schema = after.schema();
                // 遍历出结构体中的所有的字符按
                for (Field field : schema.fields()) {
                    String name = field.name();
                    String val =  after.get(field).toString();
                    dateJson.put(name,val);
                }
            }
            // 将数据中的json对象装入到父级json中
            jsonObject.put("data",dateJson);

            collector.collect(jsonObject.toJSONString());
        }


        @Override
        public TypeInformation<String> getProducedType() {
            return TypeInformation.of(String.class);
        }

    }
}
