package com.lsx143.realtime.app.dwd;

import com.alibaba.fastjson.JSONObject;
import com.lsx143.realtime.app.BaseApp;
import com.lsx143.realtime.bean.TableProcess;
import com.lsx143.realtime.common.Constants;
import com.lsx143.realtime.sink.PhoenixSink;
import com.lsx143.realtime.util.KafkaUtil;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.state.BroadcastState;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.datastream.*;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.ProcessFunction;
import org.apache.flink.streaming.api.functions.co.BroadcastProcessFunction;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.util.Collector;
import org.apache.flink.util.OutputTag;

import java.util.Arrays;
import java.util.HashMap;
import java.util.List;

/**
 * ods -> dwd 业务数据处理app
 */
public class DWDDbApp extends BaseApp {

    public static void main(String[] args) {
        new DWDDbApp().init(10002,
                "DWDDbApp",
                1,
                "DWDDbApp",
                Constants.TOPIC_ODS_DB);
    }

    /**
     * 业务数据处理逻辑
     *
     * @param env       flink的执行环境
     * @param srcStream 源数据的流
     */
    @Override
    protected void run(StreamExecutionEnvironment env, DataStreamSource<String> srcStream) {
        System.out.println("【DWDDbApp】启动");
        //1.ETL
        SingleOutputStreamOperator<JSONObject> etlStream = etl(srcStream);
        //2.读取配置表信息
        SingleOutputStreamOperator<TableProcess> tbStream = readProcessTable(env);
        //3.根据配置表进行动态分流
        //  3.1) 将配置表广播到数据表
        SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> connectedStream = connectStream(etlStream, tbStream);
//        connectedStream.print();
        //  3.2) 去除不需要操作的列
        SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> filterStream = filterColumn(connectedStream);
//        filterStream.print();

        //  3.3) 动态分流
        HashMap<String, DataStream<Tuple2<JSONObject, TableProcess>>> mapStream = splitStream(filterStream);
//        mapStream.get(Constants.SINK_TYPE_HBASE).print("");
//        mapStream.get(Constants.SINK_TYPE_KAFKA).print("");

        //4.输出数据到kafka和hBase
        sinkData(mapStream);
    }


    /**
     * 对数据流进行ETL
     *
     * @param srcStream 输入业务数据流
     * @return 清洗后的数据流
     */
    private SingleOutputStreamOperator<JSONObject> etl(DataStreamSource<String> srcStream) {
        return srcStream
                //1.maxwell若同步全部数据,则将bootstrap类型装换为普通类型
                .map(data -> JSONObject.parseObject(data.replaceAll("bootstrap-", "")))
                //2.过滤的逻辑
                .filter(obj -> obj.getString("database") != null &&
                        obj.getString("table") != null &&
                        ("insert".equals(obj.getString("type")) || "update".equals(obj.getString("type"))) &&
                        obj.getString("data") != null &&
                        obj.getString("data").length() > 3);
    }

    /**
     * 读取动态配置表
     *
     * @param env flink执行环境
     * @return 动态配置表的流
     */
    private SingleOutputStreamOperator<TableProcess> readProcessTable(StreamExecutionEnvironment env) {
        System.out.println("readProcessTable");
        //1.创建tableEnv执行环境
        final StreamTableEnvironment tEnv = StreamTableEnvironment.create(env);
        //2.跟mySQL建立动态映射表,以flink-sql-cdc完成
        tEnv
                .executeSql("CREATE TABLE `table_process`( " +
                        "   `source_table`  string, " +
                        "   `operate_type`  string, " +
                        "   `sink_type`  string, " +
                        "   `sink_table`  string, " +
                        "   `sink_columns` string, " +
                        "   `sink_pk`  string, " +
                        "   `sink_extend`  string, " +
                        "   PRIMARY KEY (`source_table`,`operate_type`)  NOT ENFORCED" +
                        ")with(" +
                        "   'connector' = 'mysql-cdc', " +
                        "   'hostname' = 'hadoop162', " +
                        "   'port' = '3306', " +
                        "   'username' = 'root', " +
                        "   'password' = 'aaaaaa', " +
                        "   'database-name' = 'gmall2021_realtime', " +
                        "   'table-name' = 'table_process'," +
                        // 启动时,读取mysql的全量,增量以及更新数据
                        "   'debezium.snapshot.mode' = 'initial' " +
                        ")"
                );

        //3.表的字段和Bean不一样,进行转换
        final Table table = tEnv.sqlQuery("select " +
                "  source_table sourceTable, " +
                "  sink_type sinkType, " +
                "  operate_type operateType, " +
                "  sink_table sinkTable, " +
                "  sink_columns sinkColumns, " +
                "  sink_pk sinkPk, " +
                "  sink_extend sinkExtend " +
                "from table_process ");

        //4.过滤掉RetractStream的历史操作,只要最新的
        return tEnv
                .toRetractStream(table, TableProcess.class)
                .filter(t -> t.f0)
                .map(t -> t.f1);
    }

    /**
     * 将配置表流广播到数据表中,用数据表流连接广播表流
     *
     * @param dbStream 业务数据表流
     * @param tbStream 配置表流
     * @return 连接后的流
     */
    private SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> connectStream(
            SingleOutputStreamOperator<JSONObject> dbStream,
            SingleOutputStreamOperator<TableProcess> tbStream) {
        //1.将配置表流做成广播流
        //  1.1) 广播流的描述符
        MapStateDescriptor<String, TableProcess> bcStateDes =
                new MapStateDescriptor<>("bcState", String.class, TableProcess.class);
        //  1.2) 生成广播流
        BroadcastStream<TableProcess> bcStream = tbStream.broadcast(bcStateDes);

        //2.用数据流去关联广播流
        return dbStream
                .connect(bcStream)
                .process(new BroadcastProcessFunction<JSONObject,
                        TableProcess,
                        Tuple2<JSONObject, TableProcess>>() {
                    @Override
                    //1.处理业务数据
                    public void processElement(JSONObject dbJsonOBJ,
                                               ReadOnlyContext ctx,
                                               Collector<Tuple2<JSONObject, TableProcess>> out) throws Exception {
                        //1.以"源数据表:操作"格式作为键值
                        String key = dbJsonOBJ.getString("table") + ":" + dbJsonOBJ.getString("type");
                        TableProcess tp = ctx.getBroadcastState(bcStateDes).get(key);
                        //2.返回数据,不返回元数据
                        if (tp != null) {
                            out.collect(new Tuple2<>(dbJsonOBJ.getJSONObject("data"), tp));
                        }
                    }

                    //2.处理广播数据
                    @Override
                    public void processBroadcastElement(TableProcess tp,
                                                        Context ctx,
                                                        Collector<Tuple2<JSONObject, TableProcess>> out) throws Exception {
                        //1.以"源数据表:操作"格式作为键值
                        String key = tp.getSourceTable() + ":" + tp.getOperateType();
                        //2.广播
                        BroadcastState<String, TableProcess> bcState = ctx.getBroadcastState(bcStateDes);
                        bcState.put(key, tp);
                    }
                });
    }

    /**
     * 去除不需要操作的行
     *
     * @param connectedStream 连接后的流
     * @return 去除不需要的行后的数据流
     */
    @SuppressWarnings("all")
    private SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> filterColumn(SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> connectedStream) {
        return connectedStream.map(new MapFunction<Tuple2<JSONObject, TableProcess>, Tuple2<JSONObject, TableProcess>>() {
            @Override
            public Tuple2<JSONObject, TableProcess> map(Tuple2<JSONObject, TableProcess> tuple) throws Exception {
                TableProcess tp = tuple.f1;
                JSONObject jsonObject = tuple.f0;
                List<String> columns = Arrays.asList(tp.getSinkColumns().split(","));
                jsonObject.keySet().removeIf(key -> !columns.contains(key));
                return tuple;
            }
        });
    }

    /**
     * 分流
     *
     * @param connectedStream 业务数据流
     * @return HashMap, 保存了kafka和hbase输出流
     */
    private HashMap<String, DataStream<Tuple2<JSONObject, TableProcess>>> splitStream(
            SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> connectedStream) {
        final OutputTag<Tuple2<JSONObject, TableProcess>> hbaseTag = new OutputTag<Tuple2<JSONObject, TableProcess>>("hbaseTag") {
        };
        //1.进行分流
        SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> processStream = connectedStream.process(new ProcessFunction<Tuple2<JSONObject, TableProcess>,
                Tuple2<JSONObject, TableProcess>>() {

            //2.测输出流输出kafka的数据,主输出流输出hbase
            @Override
            public void processElement(Tuple2<JSONObject, TableProcess> value,
                                       Context ctx,
                                       Collector<Tuple2<JSONObject, TableProcess>> out) {
                //1.判断输出类型
                String sinkType = value.f1.getSinkType();
                //2.分流
                //  2.1 kafka
                if (Constants.SINK_TYPE_KAFKA.equals(sinkType)) {
//                    System.out.println("kafka类型-主输出");
                    out.collect(value);
                } else if (Constants.SINK_TYPE_HBASE.equals(sinkType)) {
                    //  2.2 hbase
//                    System.out.println("hbase-侧输出");
                    ctx.output(hbaseTag, value);
                }
            }
        });
        //3.得到侧输出流-hbase
        DataStream<Tuple2<JSONObject, TableProcess>> hbaseStream = processStream.getSideOutput(hbaseTag);
        //4.存到map中
        HashMap<String, DataStream<Tuple2<JSONObject, TableProcess>>> streamMap = new HashMap<>();
        streamMap.put(Constants.SINK_TYPE_KAFKA, processStream);
        streamMap.put(Constants.SINK_TYPE_HBASE, hbaseStream);
        System.out.println("分流完毕");
        return streamMap;
    }

    /**
     * 输出数据到kafka/hbase
     *
     * @param streamMap kafka流/hbase流map
     */
    private void sinkData(HashMap<String, DataStream<Tuple2<JSONObject, TableProcess>>> streamMap) {
        //1.从map中读取两个流
        DataStream<Tuple2<JSONObject, TableProcess>> kafkaStream = streamMap.get(Constants.SINK_TYPE_KAFKA);
        DataStream<Tuple2<JSONObject, TableProcess>> hbaseStream = streamMap.get(Constants.SINK_TYPE_HBASE);
        System.out.println("sinkData");
        //2.写出数据
        sinkToKafka(kafkaStream);
        sinkToHbase(hbaseStream);
    }


    /**
     * 输出数据流到kafka
     *
     * @param kafkaStream kafka输入流
     */
    private void sinkToKafka(DataStream<Tuple2<JSONObject, TableProcess>> kafkaStream) {
        System.out.println("sinkToKafka");
        //调用KafkaUtil中addSink方法,其实就是产生一个kafka生产者
        kafkaStream.addSink(KafkaUtil.getKafkaSink());
    }

    /**
     * 输出数据流到hbase
     *
     * @param hbaseStream hbase输入流
     */
    private void sinkToHbase(DataStream<Tuple2<JSONObject, TableProcess>> hbaseStream) {
        //写入到HBase之前进行keyBy分组,让写入相同表的数据在一个组里
        //1.避免重复开启/关闭连接
        //2.sink中使用了状态必须keyBy
        KeyedStream<Tuple2<JSONObject, TableProcess>, String> hbaseKeyedStream =
                hbaseStream.keyBy(t -> t.f1.getSinkTable());
        hbaseKeyedStream.addSink(new PhoenixSink());
    }
}