package com.atguigu.gmall.realtime.app.dwd;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.alibaba.ververica.cdc.connectors.mysql.MySQLSource;
import com.alibaba.ververica.cdc.connectors.mysql.table.StartupOptions;
import com.alibaba.ververica.cdc.debezium.DebeziumSourceFunction;
import com.atguigu.gmall.realtime.app.func.MyDeserializationSchemaFunction;
import com.atguigu.gmall.realtime.app.func.TableProcessFunction;
import com.atguigu.gmall.realtime.common.TableProcess;
import com.atguigu.gmall.realtime.utils.DimSink;
import com.atguigu.gmall.realtime.utils.MykafkaUtil;
import org.apache.flink.api.common.functions.FilterFunction;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.streaming.api.datastream.*;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.KafkaSerializationSchema;
import org.apache.flink.util.OutputTag;
import org.apache.kafka.clients.producer.ProducerRecord;

import javax.annotation.Nullable;


/**
 * => 业务数据动态分流
 */
public class BaseDBapp {
    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(4);
//        env.enableCheckpointing(5000L, CheckpointingMode.EXACTLY_ONCE);
//        env.getCheckpointConfig().setCheckpointTimeout(6000L);
//        env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
//        env.setRestartStrategy(RestartStrategies.fixedDelayRestart(3, 3000L));
//        env.setStateBackend(new FsStateBackend("hdfs://hadoop102:8020/gmall"));
        System.setProperty("HADOOP_USER_NAME", "atguigu");
// TODO: 从kafka中消费数据
        String topic = "ods_base_db_m";
        String groupId = "base_db_app_group2";
        DataStreamSource<String> kafkaSource = env.addSource(MykafkaUtil.getkafkaSource(topic, groupId));
//    对流中的数据进行结构的转换
        SingleOutputStreamOperator<JSONObject> kafkaDS = kafkaSource.map(
                new MapFunction<String, JSONObject>() {
                    @Override
                    public JSONObject map(String jsonobj) throws Exception {

                        return JSON.parseObject(jsonobj);
                    }
                }
        );
// TODO: 对流中的数据进行ETL
        SingleOutputStreamOperator<JSONObject> filterDS = kafkaDS.filter(new FilterFunction<JSONObject>() {
            @Override
            public boolean filter(JSONObject jsonObject) throws Exception {
                Boolean flag = jsonObject.getString("table") != null
                        && jsonObject.getString("table").length() > 0
                        && jsonObject.getJSONObject("data") != null
                        && jsonObject.getString("data").length() > 5;

                return flag;
            }
        });
        filterDS.print(">>>>");
        // TODO: 4. 使用FlinkCDC读取配置表数据
        //4.1 创建MySQLSourceFunction
        DebeziumSourceFunction<String> sourceFunction = MySQLSource.<String>builder()
                .hostname("hadoop102")
                .port(3306)
                .username("root")
                .password("000000")
                .databaseList("gmall2021_realtime")
                .tableList("gmall2021_realtime.table_process")
                .startupOptions(StartupOptions.initial())
                .deserializer(new MyDeserializationSchemaFunction())
                .build();
        // 4.2读取数据封装为流
        DataStreamSource<String> MysqlDS = env.addSource(sourceFunction);
        // 4.3 定义广播状态描述器
        MapStateDescriptor<String, TableProcess> mapStateDescriptor =
                new MapStateDescriptor<String, TableProcess>("table_process", String.class, TableProcess.class);
        // 4.4 将读取的配置信息转为广播流
        BroadcastStream<String> broadcastDS = MysqlDS.broadcast(mapStateDescriptor);
        //TODO  5. 连接主流和配置广播流
        // 5.1
        BroadcastConnectedStream<JSONObject, String> connectDS = filterDS.connect(broadcastDS);
//        5.2 定义侧输出流标记
        OutputTag<JSONObject> dimTag = new OutputTag<JSONObject>("dimTag") {
        };
//        5.3 对连接的数据进行分流处理  维度--侧输出流 | 实施--> 主流
        SingleOutputStreamOperator<JSONObject> realDS = connectDS.process(new TableProcessFunction(dimTag, mapStateDescriptor));
        DataStream<JSONObject> dimDS = realDS.getSideOutput(dimTag);
        // 5.4 获取维度侧输出流
        realDS.print("主流");
        dimDS.print("维度");


        // TODO: 6. 将维度侧输出流数据保存到Phoenix中
        dimDS.addSink(new DimSink());
        // TODO: 7. 将事实主流数据写回到kafka的dwd层
        realDS.addSink(
                MykafkaUtil.getKafkaSinkBySchema(new KafkaSerializationSchema<JSONObject>() {
                    @Override
                    public ProducerRecord<byte[], byte[]> serialize(JSONObject jsonObject, @Nullable Long timestamp) {
                        String topic = jsonObject.getString("sink_table");
                        JSONObject dataJsonObj = jsonObject.getJSONObject("data");
                        return new ProducerRecord<byte[], byte[]>(topic, dataJsonObj.toJSONString().getBytes());
                    }
                })
        );
        env.execute();

    }
}
