package com.atguigu.gmall.realtime.app.dwd;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.atguigu.gmall.realtime.app.BaseAppV1;
import com.atguigu.gmall.realtime.base.TableProcess;
import com.atguigu.gmall.realtime.common.Constant;
import com.atguigu.gmall.realtime.util.FlinkSinkUtil;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.state.BroadcastState;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.api.common.state.ReadOnlyBroadcastState;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.datastream.BroadcastStream;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.ProcessFunction;
import org.apache.flink.streaming.api.functions.co.BroadcastProcessFunction;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.util.Collector;
import org.apache.flink.util.OutputTag;

import java.util.Arrays;
import java.util.List;


/*
 * @author liuyun
 * @title: DwdDbApp
 * @projectName gmal0924
 * @description: TODO
 * @date 2022-3-17 22:37
 */
public class DwdDbApp extends BaseAppV1 {
    public static void main(String[] args) {
        new DwdDbApp().init(2002,
                1,
                "DwdDbApp",
                "DwdDbApp",
                Constant.TOPIC_ODS_DB
        );
    }

    /*
     * @param
     * @return
     * @description: TODO
     * @author liuyun
     * @date 2022-3-17 22:47
     */
    @Override
    protected void handle(StreamExecutionEnvironment env, DataStreamSource<String> stream) {
        stream.print();

        // 1. 对业务数据做etl

        // stream.print();
        SingleOutputStreamOperator<JSONObject> etlStream = etl(stream);
        //System.out.println("**********etlStream***********");
        //etlStream.print();

        // 2. 读取配置表的数据
        SingleOutputStreamOperator<TableProcess> tpSteam = readTableProcess(env);
        //System.out.println("**********tpSteam***********");
        //tpSteam.print();

        //3. 数据流和配置流进行connect, 实现动态分流
        SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> connectedStream = connectStreams(
                etlStream, tpSteam);
        //System.out.println("*********connectedStream************");
        //connectedStream.print();

        //4. 过滤掉不需要的字段
        SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> filterColumnsStream = filterColumns(connectedStream);
        //filterColumnsStream.print();

        //5、动态分流
        Tuple2<SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>>,
                DataStream<Tuple2<JSONObject, TableProcess>>> kafkaHbaseStream =
                dynamicSplitStream(filterColumnsStream);


        //不同的数据写入到不同的sink
        writeToKafka(kafkaHbaseStream.f0);

        kafkaHbaseStream.f0.print("kafka");

        writeToHbase(kafkaHbaseStream.f1);
        kafkaHbaseStream.f1.print("hbase");
    }

    private void writeToHbase(DataStream<Tuple2<JSONObject, TableProcess>> stream) {

        /*
         *维度表的数据写入到hbase中,通过我们创建
         */

        stream
                .keyBy(t -> t.f1.getSink_table())
                .addSink(FlinkSinkUtil.getPhoenixSink());

    }

    private void writeToKafka(SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> stream) {
        stream.print();
        /*
         * 把数据写入到kafka中
         */
        stream
                .addSink(FlinkSinkUtil.getKafkaSink());

    }

    private Tuple2<SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>>, DataStream<Tuple2<JSONObject, TableProcess>>> dynamicSplitStream(
            SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> filterColumnsStream) {
        OutputTag<Tuple2<JSONObject, TableProcess>> hbaseTag =
                new OutputTag<Tuple2<JSONObject, TableProcess>>("hbase") {
                };

        /*
          数据一共有两个sink ：Kafka hbase(phoenix)
          分两个流 一个流（事实表）到Kafka 一个流（维度表）到phoenix 执行sqL写明表名
          住流到 kafka 测输出流到phoenix
         */
        SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> kafkaStream = filterColumnsStream.process(
                new ProcessFunction<Tuple2<JSONObject, TableProcess>, Tuple2<JSONObject, TableProcess>>() {
                    @Override
                    public void processElement(
                            Tuple2<JSONObject, TableProcess> value,
                            ProcessFunction<Tuple2<JSONObject, TableProcess>, Tuple2<JSONObject, TableProcess>>.Context ctx,
                            Collector<Tuple2<JSONObject, TableProcess>> out) throws Exception {
                        String sinkType = value.f1.getSink_type();
                        if (Constant.SINK_KAFKA.equals(sinkType)) {
                            out.collect(value);
                        } else if (Constant.HBASE.equals(sinkType)) {
                            ctx.output(hbaseTag, value);
                        }
                    }
                });

        DataStream<Tuple2<JSONObject, TableProcess>> hbaseSteam = kafkaStream.getSideOutput(hbaseTag);

        return Tuple2.of(kafkaStream, hbaseSteam);

    }

    private SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> filterColumns(
            SingleOutputStreamOperator<Tuple2<JSONObject,
                    TableProcess>> connectedStream) {
        return connectedStream.map(new MapFunction<Tuple2<JSONObject, TableProcess>, Tuple2<JSONObject, TableProcess>>() {
            @Override
            public Tuple2<JSONObject, TableProcess> map(Tuple2<JSONObject, TableProcess> value) throws Exception {
                //把不需要的字段过滤掉
                JSONObject data = value.f0;
                //  String[] columns = value.f1.getSink_columns().split(",");
                List<String> columns = Arrays.asList(value.f1.getSink_columns().split(","));
                //遍历data中的每个列名，如果存在于columns这个集合中，则保留， 否则删除
                data.keySet().removeIf(key -> !columns.contains(key));

                return value;

            }
        });
    }

    /*
     * @param
     * @return
     * @description: TODO
     * @author liuyun
     * @date 2022-3-18 12:35
     */
    private SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> connectStreams(
            SingleOutputStreamOperator<JSONObject> dataStream,
            SingleOutputStreamOperator<TableProcess> tpSteam) {
        MapStateDescriptor<String, TableProcess> tpStateDesc = new MapStateDescriptor<>("tpState", String.class,
                TableProcess.class
        );
        // 1. 配置流做成广播流
        // key字符串  表名+操作类型   order_info:insert
        BroadcastStream<TableProcess> bcStream = tpSteam.broadcast(tpStateDesc);
        // 2. 数据流去connect广播流
        return dataStream
                .connect(bcStream)
                // 3. 进行处理
                .process(new BroadcastProcessFunction<JSONObject, TableProcess, Tuple2<JSONObject, TableProcess>>() {
                    // 处理数据流中的数据
                    @Override
                    public void processElement(JSONObject value,
                                               ReadOnlyContext ctx,
                                               Collector<Tuple2<JSONObject, TableProcess>> out) throws Exception {
                        // 根据表名:操作类型, 从广播状态中国获取对应的配置信息
                        ReadOnlyBroadcastState<String, TableProcess> tpState = ctx.getBroadcastState(tpStateDesc);
                        // 拼接处key
                        String key = value.getString("table") + ":" + value.getString("type");
                        TableProcess tp = tpState.get(key);
                        // 有些表在配置信息中并没有, 表示这张表的数据不需要sink
                        // 如果这张表的配置不存在, tp对象就是null
                        if (tp != null) {
                            // 每条数据的元数据在tp中基本都有体现, 所以, 数据中的元数据信息可以去掉, 只保留data字段中的数据
                            out.collect(Tuple2.of(value.getJSONObject("data"), tp));
                        }
                    }

                    // 处理广播流中的数据
                    @Override
                    public void processBroadcastElement(TableProcess value,
                                                        Context ctx,
                                                        Collector<Tuple2<JSONObject, TableProcess>> out) throws Exception {
                        // 把流表中的每个配置, 写入到广播状态中
                        BroadcastState<String, TableProcess> tpState = ctx.getBroadcastState(tpStateDesc);

                        String key = value.getSource_table() + ":" + value.getOperate_type();
                        tpState.put(key, value);
                    }
                });


    }


    /*
     * @param
     * @return
     * @description: TODO
     * @author liuyun
     * @date 2022-3-17 22:47
     */
    private SingleOutputStreamOperator<TableProcess> readTableProcess(StreamExecutionEnvironment env) {
        // 创建表的执行环节
        StreamTableEnvironment tEnv = StreamTableEnvironment.create(env);
        tEnv.executeSql("CREATE TABLE `table_process` (" +
                "  `source_table` string," +
                "  `operate_type` string," +
                "  `sink_type` string," +
                "  `sink_table` string," +
                "  `sink_columns` string," +
                "  `sink_pk` string," +
                "  `sink_extend` string," +
                "  PRIMARY KEY (`source_table`,`operate_type`)not enforced" +
                ")with(" +
                " 'connector' = 'mysql-cdc'," +
                " 'hostname' = 'hadoop162'," +
                " 'port' = '3306'," +
                " 'username' = 'root'," +
                " 'password' = 'aaaaaa'," +
                " 'database-name' = 'gmall2022_realtime'," +
                " 'table-name' = 'table_process', " +
                // 程序第一次启动先读取表中所有数据, 然后在使用binlog实时监控变化
                " 'debezium.snapshot.mode'='initial'" +
                ")");
        // tEnv.sqlQuery("select * from table_process").execute().print();
        Table tp = tEnv.from("table_process");
        return tEnv
                .toRetractStream(tp, TableProcess.class)
                .filter(t -> t.f0)
                .map(t -> t.f1);
    }

    /*
     * @param
     * @return
     * @description: TODO
     * @author liuyun
     * @date 2022-3-17 22:47
     */
    private SingleOutputStreamOperator<JSONObject> etl(DataStreamSource<String> stream) {
        return stream
                .map(data -> JSON.parseObject(data.replaceAll("bootstrap-", "")))
                .filter(obj ->
                        "gmall2022".equals(obj.getString("database"))
                                && obj.getString("table") != null
                                && ("insert".equals(obj.getString("type")) || "update".equals(obj.getString("type")))
                                && obj.getString("data") != null
                                && obj.getString("data").length() > 2
                );

    }
}
