package com.atguigu.realtime.app.dwd.db;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.atguigu.realtime.beans.BaseDbTableProcess;
import com.atguigu.realtime.func.BaseDbTableProcessFunction;
import com.atguigu.realtime.utils.MyKafkaUtil;
import com.ververica.cdc.connectors.mysql.source.MySqlSource;
import com.ververica.cdc.connectors.mysql.table.StartupOptions;
import com.ververica.cdc.debezium.JsonDebeziumDeserializationSchema;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.connector.kafka.sink.KafkaRecordSerializationSchema;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.streaming.api.datastream.BroadcastConnectedStream;
import org.apache.flink.streaming.api.datastream.BroadcastStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.ProcessFunction;
import org.apache.flink.streaming.api.functions.co.BroadcastProcessFunction;
import org.apache.flink.util.Collector;
import org.apache.kafka.clients.producer.ProducerRecord;

import javax.annotation.Nullable;
import java.nio.charset.StandardCharsets;

/**
 * @author: 洛尘
 * @since: 2023-10-08 18:41
 * @description:
 *  处理逻辑比较简单的事实表动态分流
 *  需要启动的进程
 *      zk、kafka、maxwell、BaseDbApp
 **/
public class BaseDbApp {
    public static void main(String[] args) {
        //1.基本环境准备
        //1.1准备流环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        //1.2设置并行度
        env.setParallelism(4);
        //2.检查点相关的设置（略）
        //3.从kafka的主题中读取数据
        //3.1声明消费的主题以及消费者组
        String topic="topic_db";
        String groupId="base_db_group";
        //3.2创建消费者对象
        KafkaSource<String> kafkaSource = MyKafkaUtil.getKafkaSource(topic, groupId);
        //3.3消费数据，封装成流
        SingleOutputStreamOperator<String> kafkaStrDS = env.fromSource(kafkaSource, WatermarkStrategy.noWatermarks(), "kafka_source");
        //4.对流中的数据进行简单的ETL清洗 jsonStr->jsonObj
        SingleOutputStreamOperator<JSONObject> jsonDs = kafkaStrDS.process(
                new ProcessFunction<String, JSONObject>() {
                    @Override
                    public void processElement(String jsonStr, Context context, Collector<JSONObject> collector) throws Exception {
                        try {
                            JSONObject jsonObj = JSONObject.parseObject(jsonStr);
                            String type = jsonObj.getString("type");
                            if (!type.startsWith("bootstrap-")) {
                                //过滤掉维度数据
                                collector.collect(jsonObj);

                            }
                        } catch (Exception e) {
                            e.printStackTrace();
                        }
                    }
                }
        );
//        jsonDs.print(">>>");

        //5.使用flinkcdc从配置表中读取数据
        //5.1从mysqlsource中读取，创建mysqlsource
        MySqlSource<String> mySqlSource = MySqlSource.<String>builder()
                .hostname("hadoop105")
                .port(3306)
                .databaseList("gmall0417_config")
                .tableList("gmall0417_config.table_process_dwd")
                .username("root")
                .password("000000")
                .startupOptions(StartupOptions.initial())
                .deserializer(new JsonDebeziumDeserializationSchema())
                .build();
        //5.2读取数据 封装为流
        SingleOutputStreamOperator<String> myStrDS = env.fromSource(mySqlSource, WatermarkStrategy.noWatermarks(), "mysql_source");

//        myStrDS.print("...");
        //6.对读取的配置进行广播
        MapStateDescriptor<String, BaseDbTableProcess> mapStateDescriptor =
                new MapStateDescriptor<>("mapStateDescriptor", String.class, BaseDbTableProcess.class);
        BroadcastStream<String> broadcastDS = myStrDS.broadcast(mapStateDescriptor);
        //7.将主流业务数据和广播流配置数据进行关联
        BroadcastConnectedStream<JSONObject, String> connectDS = jsonDs.connect(broadcastDS);

        //8.对关联后的数据进行处理
        SingleOutputStreamOperator<JSONObject> realDS = connectDS.process(
                new BaseDbTableProcessFunction(mapStateDescriptor));
        //9.将动态分流的实时表数据写道kafka不同的主题中

        realDS.sinkTo(MyKafkaUtil.getKafkaSinkBySchema(
                new KafkaRecordSerializationSchema<JSONObject>() {
                    @Nullable
                    @Override
                    public ProducerRecord<byte[], byte[]> serialize(JSONObject jsonObject, KafkaSinkContext kafkaSinkContext, Long aLong) {
                        // {"create_time":"2023-09-23 11:42:51","user_id":1471,"sku_id":21,"sink_table":"dwd_interaction_favor_add","id":1608723429735806459,"ts":1696750795}
                        String sink_topic = jsonObject.getString("sink_table");
                        //第8步的时候补充数据到流中目的是为了在这里确定我要传输到的主题当中，但是写入主题的时候不能有这个数据，所以执行删除操作。
                        jsonObject.remove("sink_table");
                        return new ProducerRecord<byte[], byte[]>(sink_topic,jsonObject.toString().getBytes(StandardCharsets.UTF_8));
                    }
                }
        ));



        try {
            env.execute();
        } catch (Exception e) {
            e.printStackTrace();
        }
    }
}