package com.atguigu.realtime.app.dim;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.atguigu.realtime.beans.TableProcess;
import com.atguigu.realtime.common.GmallConfig;
import com.atguigu.realtime.func.DimSinkFunction;
import com.atguigu.realtime.func.TableProcessFunction;
import com.atguigu.realtime.utils.HbaseUtil;
import com.atguigu.realtime.utils.MyKafkaUtil;
import com.ververica.cdc.connectors.mysql.source.MySqlSource;
import com.ververica.cdc.connectors.mysql.table.StartupOptions;
import com.ververica.cdc.debezium.JsonDebeziumDeserializationSchema;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.streaming.api.datastream.BroadcastConnectedStream;
import org.apache.flink.streaming.api.datastream.BroadcastStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.ProcessFunction;
import org.apache.flink.util.Collector;
import org.apache.hadoop.hbase.client.Connection;

import java.util.Properties;

/**
 * @author: 洛尘
 * @since: 2023-09-25 23:38
 * @description:
 **/
public class DimApp {
    public static void main(String[] args) {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(4);
//        env.enableCheckpointing(5000L,CheckpointingMode.EXACTLY_ONCE);

        //从kafka主题读取相关数据，定义一个kafkasorce的工具类，并定义好参数
        String topic="topic_db";
        String groupId="dim_app_group";
        //创建消费者对象
        KafkaSource<String> kafkaSource = MyKafkaUtil.getKafkaSource(topic, groupId);
        //消费数据 封装成流
        SingleOutputStreamOperator<String> kafkaDs = env.fromSource(kafkaSource,
                WatermarkStrategy.noWatermarks(), "kafkaDs");
        //对读取的数据进行简单的ETL处理   jsonStr->jsonObj
        SingleOutputStreamOperator<JSONObject> jsonObjDS = kafkaDs.process(
                new ProcessFunction<String, JSONObject>() {
                    @Override
                    public void processElement(String jsonStr, Context context, Collector<JSONObject> collector) throws Exception {
                        try {
                            JSONObject jsonObj = JSONObject.parseObject(jsonStr);
                            String type = jsonObj.getString("type");
                            //将多余的过滤掉不要
                            if (!"bootstrap-start".equals(type) && !"bootstrap-complete".equals(type)) {
                                collector.collect(jsonObj);
                            }
                        } catch (Exception e) {
                            e.printStackTrace();
                        }
                    }
                }
        );
        //使用flinkcdc读取配置表中的配置信息
        //1.创建MySqlSource
        Properties properties = new Properties();
        //mysql的jdbc连接中，useSSL属性是用来制动是否使用SSL加密连接，true表示用false反之，本地或者私有网络中使用false
        //这样做的好处是可以减少建立和数据传输的开销，从而提高性能
        properties.setProperty("useSSL","false");
        MySqlSource<String> mySqlSource = MySqlSource.<String>builder()
                .hostname("hadoop105")
                .port(3306)
                .databaseList("gmall0417_config")
                .tableList("gmall0417_config.table_process_dim")
                .username("root")
                .password("000000")
                .jdbcProperties(properties)
                .serverTimeZone("Asia/Shanghai")
                .deserializer(new JsonDebeziumDeserializationSchema())
                //这个参数可以用来控制Flink从MySQL中读取数据的起始位置
                //StartupOptions.INITIAL：表示从数据源的初始位置开始读取数据。这意味着Flink会从MySQL的第一条记录开始读取数据。
                //StartupOptions.LATEST：表示从数据源的最新位置开始读取数据。这意味着Flink会从MySQL的最后一条记录开始读取数据。
                //StartupOptions.SPECIFIC_OFFSET：表示从指定的偏移量位置开始读取数据。您可以使用specificOffset参数来指定偏移量的值。
                .startupOptions(StartupOptions.initial())
                .build();
        //1.1读取数据封装成流
        //"op":"r": {"before":null,"after":{"source_table":"user_info","sink_table":"dim_user_info","sink_family":"info","sink_columns":"id,login_name,name,user_level,birthday,gender,create_time,operate_time","sink_row_key":"id"},"source":{"version":"1.6.4.Final","connector":"mysql","name":"mysql_binlog_source","ts_ms":0,"snapshot":"false","db":"gmall0417_config","sequence":null,"table":"table_process_dim","server_id":0,"gtid":null,"file":"","pos":0,"row":0,"thread":null,"query":null},"op":"r","ts_ms":1695539055170,"transaction":null}
        //"op":"c": {"before":null,"after":{"source_table":"a","sink_table":"dim_a","sink_family":"info","sink_columns":"id,name","sink_row_key":"id"},"source":{"version":"1.6.4.Final","connector":"mysql","name":"mysql_binlog_source","ts_ms":1695539123000,"snapshot":"false","db":"gmall0417_config","sequence":null,"table":"table_process_dim","server_id":1,"gtid":null,"file":"mysql-bin.000003","pos":11368309,"row":0,"thread":null,"query":null},"op":"c","ts_ms":1695539123216,"transaction":null}
        //"op":"u": {"before":{"source_table":"a","sink_table":"dim_a","sink_family":"info","sink_columns":"id,name","sink_row_key":"id"},"after":{"source_table":"a","sink_table":"dim_a","sink_family":"info","sink_columns":"id,name,age","sink_row_key":"id"},"source":{"version":"1.6.4.Final","connector":"mysql","name":"mysql_binlog_source","ts_ms":1695539150000,"snapshot":"false","db":"gmall0417_config","sequence":null,"table":"table_process_dim","server_id":1,"gtid":null,"file":"mysql-bin.000003","pos":11368669,"row":0,"thread":null,"query":null},"op":"u","ts_ms":1695539150100,"transaction":null}
        //"op":"d": {"before":{"source_table":"a","sink_table":"dim_a","sink_family":"info","sink_columns":"id,name,age","sink_row_key":"id"},"after":null,"source":{"version":"1.6.4.Final","connector":"mysql","name":"mysql_binlog_source","ts_ms":1695539176000,"snapshot":"false","db":"gmall0417_config","sequence":null,"table":"table_process_dim","server_id":1,"gtid":null,"file":"mysql-bin.000003","pos":11369055,"row":0,"thread":null,"query":null},"op":"d","ts_ms":1695539176056,"transaction":null}
        //mysqlSource读取数据封装成流的方式与kafkasource方式一样
        SingleOutputStreamOperator<String> mySqlSourceDs = env
                .fromSource(mySqlSource, WatermarkStrategy.noWatermarks(), "mySqlSource");
        //2.将哦才能够配置表中读取到的配置信息封装为一个配置实体类对象 jsonStr->实体类对象
        SingleOutputStreamOperator<TableProcess> tableProcessDS = mySqlSourceDs.map(
                new MapFunction<String, TableProcess>() {
                    @Override
                    public TableProcess map(String jsonStr) throws Exception {
                        //1)为了方便处理，将jsonStr转换为jsonObj
                        JSONObject jsonObj = JSON.parseObject(jsonStr);
                        //2)获取配置表的操作类型
                        String op = jsonObj.getString("op");
                        //将获取的对象都装到定义的实体类对象中
                        TableProcess tableProcess = null;
                        if ("d".equals(op)) {
                            tableProcess = jsonObj.getObject("before", TableProcess.class);
                        } else {
                            tableProcess = jsonObj.getObject("after", TableProcess.class);
                        }
                        tableProcess.setOp(op);
                        return tableProcess;
                    }
                }
        );
        tableProcessDS.print(">>");
        //7.根据配置信息提前将hbase中的表创建或者删除
       tableProcessDS = tableProcessDS.process(
                new ProcessFunction<TableProcess, TableProcess>() {
                    private Connection conn;

                    @Override
                    public void open(Configuration parameters) throws Exception {
                        conn = HbaseUtil.getHbaseConnection();
                    }

                    @Override
                    public void close() throws Exception {
                        HbaseUtil.closeHbaseConnection(conn);
                    }

                    @Override
                    public void processElement(TableProcess tableProcess, Context context, Collector<TableProcess> collector) throws Exception {
                        //获取对配置表进行的操作类型
                        String op = tableProcess.getOp();
                        //根据对配置表的操作类型，到hbase中建表或者删表
                        //建表需要知道表来源的名字以及列族所以先获取到
                        String sinkTable = tableProcess.getSinkTable();
                        String[] famlies = tableProcess.getSinkColumns().split(",");
                        //将此操作封装到一个工具类中去使用
                        if ("r".equals(op) || "c".equals(op)) {
                            //建表
                            HbaseUtil.createHbaseTable(conn, GmallConfig.HBASE_NAMESPACE, sinkTable, famlies);
                        } else if ("u".equals(op)) {
                            //更改操作是先删除后增
                            HbaseUtil.dropHbaseTable(conn, GmallConfig.HBASE_NAMESPACE, sinkTable);

                            HbaseUtil.createHbaseTable(conn, GmallConfig.HBASE_NAMESPACE, sinkTable, famlies);
                        } else {
                            //删表
                            HbaseUtil.dropHbaseTable(conn, GmallConfig.HBASE_NAMESPACE, sinkTable);
                        }
                        collector.collect(tableProcess);
                    }
                }
        );

        //3.将配置流进行 广播
        MapStateDescriptor<String, TableProcess> mapStateDescriptor =
                new MapStateDescriptor<>("mapStateDescriptor", String.class, TableProcess.class);
        BroadcastStream<TableProcess> broadcastDs = tableProcessDS.broadcast(mapStateDescriptor);
        //4.将主流和广播流 进行关联
        BroadcastConnectedStream<JSONObject, TableProcess> connectDS = jsonObjDS.connect(broadcastDs);
        //对关联之后的数据进行处理
        SingleOutputStreamOperator<JSONObject> dimDS = connectDS.process(
                new TableProcessFunction(mapStateDescriptor)
        );
        //5.将维度数据写到hbase
        dimDS.print(">>>");
        dimDS.addSink(
            new DimSinkFunction()
        );

        try {
            env.execute();
        } catch (Exception e) {
            e.printStackTrace();
        }
    }

}