package cn.dmrliu.edu.realtime.app.dwd.db;

import cn.dmrliu.edu.realtime.app.func.DwdTableProcessFunction;
import cn.dmrliu.edu.realtime.bean.TableProcess;
import cn.dmrliu.edu.realtime.util.KafkaUtil;
import com.alibaba.fastjson.JSONObject;
import com.ververica.cdc.connectors.mysql.source.MySqlSource;
import com.ververica.cdc.connectors.mysql.table.StartupOptions;
import com.ververica.cdc.debezium.JsonDebeziumDeserializationSchema;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.connector.kafka.sink.KafkaRecordSerializationSchema;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.streaming.api.datastream.BroadcastConnectedStream;
import org.apache.flink.streaming.api.datastream.BroadcastStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.ProcessFunction;
import org.apache.flink.util.Collector;
import org.apache.kafka.clients.producer.ProducerRecord;

import java.util.Properties;


/**
 * 用户注册
 * 课程评价
 * 用户收藏
 */
public class DwdBaseDB {
    public static void main(String[] args) throws Exception {
        // TODO 1.基本环境的准备
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(4);

        // TODO 2.检查点的设置

        // TODO 3.从kafka的db主题读取数据，创建动态表
        String topic = "edu_db";
        String group = "edu_dwd_base_db_group";
        KafkaSource<String> kafkaSource = KafkaUtil.getKafkaSource(topic, group);
        DataStreamSource<String> kafkaDS = env.fromSource(kafkaSource, WatermarkStrategy.noWatermarks(), "kafka source");

        // TODO 4.ETL并转换数据格式为json
        SingleOutputStreamOperator<JSONObject> etlJsonObjectDS = kafkaDS.process(new ProcessFunction<String, JSONObject>() {
            @Override
            public void processElement(String jsonStr, Context context, Collector<JSONObject> out) throws Exception {
                try {
                    JSONObject jsonObject = JSONObject.parseObject(jsonStr);
                    String type = jsonObject.getString("type");
                    if (!"bootstrap-start".equals(type) &&
                            !"bootstrap-complete".equals(type) &&
                            !"bootstrap-insert".equals(type)) {
                        out.collect(jsonObject);
                    }
                } catch (Exception e) {
                    e.printStackTrace();
                }
            }
        });

        // TODO 5.创建配置流，并广播
        Properties properties = new Properties();
        properties.setProperty("useSSL", "false");
        MySqlSource<String> mySqlSource = MySqlSource.<String>builder()
                .hostname("h102")
                .port(3306)
                .username("root")
                .password("j)k*x~y0)*n_L)!o{y?C`w(1rV_viw")
                .jdbcProperties(properties)
                .databaseList("edu_config")
                .tableList("edu_config.table_process")
                .startupOptions(StartupOptions.initial())
                .deserializer(new JsonDebeziumDeserializationSchema())
                .build();
        DataStreamSource<String> mysqlDS = env.fromSource(mySqlSource, WatermarkStrategy.noWatermarks(), "mysql source");
        MapStateDescriptor<String, TableProcess> mapstatedescriptor = new MapStateDescriptor<>("dwd_base_db_table_process", String.class, TableProcess.class);
        BroadcastStream<String> broadcastDS = mysqlDS.broadcast(mapstatedescriptor);

        // TODO 6.主流和广播流关联，处理数据
        BroadcastConnectedStream<JSONObject, String> connectDS = etlJsonObjectDS.connect(broadcastDS);
        SingleOutputStreamOperator<JSONObject> processDS = connectDS.process(new DwdTableProcessFunction(mapstatedescriptor));

//        processDS.print("合并处理和数据+++++++++");

        // TODO 7.将数据写入kafka
        processDS.sinkTo(KafkaUtil.getKafkaSinkBySchema(new KafkaRecordSerializationSchema<JSONObject>() {
            @Override
            public ProducerRecord<byte[], byte[]> serialize(JSONObject jsonObject, KafkaSinkContext kafkaSinkContext, Long aLong) {
                String sinkTable = jsonObject.getString("sink_table");
                jsonObject.remove("sink_table");
                return new ProducerRecord<>(sinkTable, jsonObject.toString().getBytes());
            }
        }));

        env.execute();

    }
}
