package com.atguigu.realtime.app.dwd;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.atguigu.realtime.app.BaseAppV1;
import com.atguigu.realtime.bean.TableProcess;
import com.atguigu.realtime.common.Constant;
import com.atguigu.realtime.util.FlinkSinkUtil;
import org.apache.flink.api.common.state.BroadcastState;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.api.common.state.ReadOnlyBroadcastState;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.datastream.BroadcastStream;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.ProcessFunction;
import org.apache.flink.streaming.api.functions.co.BroadcastProcessFunction;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.util.Collector;
import org.apache.flink.util.OutputTag;

import java.util.Arrays;
import java.util.List;

/**
 * @author chenlongStart
 * @create 2021-06-25 18:01
 * @desc
 */
public class DwdDbApp extends BaseAppV1 {
    public static void main(String[] args) {
        new DwdDbApp().init(2002,1,"DwdDbApp", Constant.TOPIC_ODS_DB,"DwdDbApp");

    }

    @Override
    public void run(StreamExecutionEnvironment env, DataStreamSource<String> sourceStream) {
        //1、读取配置表数据，使用cdc把数据做成流
        SingleOutputStreamOperator<TableProcess> tpStream = readProcessTable(env);

        //2、sourceStream 就是数据表的数据 做一些etl
        SingleOutputStreamOperator<JSONObject> etlStream = etl(sourceStream);

        //3、将两个流connect到一起
        SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> dataTpStream = connectStream(etlStream, tpStream);

        //4、进行动态分流
        Tuple2<SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>>, DataStream<Tuple2<JSONObject, TableProcess>>> kafkaAndHbaseStreams = dynamic(dataTpStream);

        // 5. 事实表数据写入到Kafka
        sendToKafka(kafkaAndHbaseStreams.f0);

        // 6. 事实表数据写入到Hbase
        sendToHbase(kafkaAndHbaseStreams.f1);
    }

    private void sendToHbase(DataStream<Tuple2<JSONObject, TableProcess>> hbaseStream) {
//        System.out.println("DwdDbApp.sendToHbase");
        hbaseStream
                .keyBy(s->s.f1.getSinkTable())
                .addSink(FlinkSinkUtil.getHBaseSink());
    }

    private void sendToKafka(SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> stream) {
        stream.addSink(FlinkSinkUtil.getKafkaSink());
    }

    private Tuple2<SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>>, DataStream<Tuple2<JSONObject, TableProcess>>> dynamic(SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> dataTpStream) {

        OutputTag<Tuple2<JSONObject, TableProcess>> hbaseTag = new OutputTag<Tuple2<JSONObject, TableProcess>>("hbase") {};

        SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> kafkaStream = dataTpStream
                .process(new ProcessFunction<Tuple2<JSONObject, TableProcess>, Tuple2<JSONObject, TableProcess>>() {
                    @Override
                    public void processElement(Tuple2<JSONObject, TableProcess> value,
                                               Context ctx,
                                               Collector<Tuple2<JSONObject, TableProcess>> out) throws Exception {
                        TableProcess tp = value.f1;

                        JSONObject data = value.f0.getJSONObject("data");
                        filterColumns(data, tp);
                        Tuple2<JSONObject, TableProcess> result = Tuple2.of(data, tp);

                        if (TableProcess.SINK_TYPE_KAFKA.equals(tp.getSinkType())) {
                            out.collect(result);
                        } else if (TableProcess.SINK_TYPE_HBASE.equals(tp.getSinkType())) {
                            ctx.output(hbaseTag, result);
                        }
                    }

                    private void filterColumns(JSONObject data, TableProcess tp) {
                        List<String> columns = Arrays.asList(tp.getSinkColumns().split(","));
                        data.keySet().removeIf(key -> !columns.contains(key));
                    }
                });
        DataStream<Tuple2<JSONObject, TableProcess>> hbaseStream = kafkaStream.getSideOutput(hbaseTag);
        return Tuple2.of(kafkaStream,hbaseStream);
    }

    private SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> connectStream(SingleOutputStreamOperator<JSONObject> etlStream, SingleOutputStreamOperator<TableProcess> tpStream) {
        MapStateDescriptor<String, TableProcess> tpStateDesc = new MapStateDescriptor<>("tpState", String.class, TableProcess.class);

        //1、先把配置流变成广播流
        /*
         假设来了一条user相关的数据, 需要知道这条数据应该去哪?
         有谁来决定这条数据的sink?   表名+操作类型

         根据这个两个的组合(key), 应该能够找到一个唯一的 TableProcess 对象(value)
         如何使用这个广播状态:
         */
        BroadcastStream<TableProcess> tpBCStream = tpStream.broadcast(tpStateDesc);

        // 2. 让数据流和广播流进行connect
       return etlStream
                .connect(tpBCStream)
                .process(new BroadcastProcessFunction<JSONObject, TableProcess, Tuple2<JSONObject,TableProcess>>() {
                    @Override
                    public void processElement(JSONObject value,
                                               ReadOnlyContext ctx,
                                               Collector<Tuple2<JSONObject, TableProcess>> out) throws Exception {
                        //处理数据流
                        ReadOnlyBroadcastState<String, TableProcess> tpState = ctx.getBroadcastState(tpStateDesc);

                        // 如果旧的维度是通过bootstrap来实现采集到的, 则这个时候type的值是: bootstrap-insert
                        String key =value.getString("table")+":"+value.getString("type").replaceAll("bootstrap-","");
                        TableProcess tp = tpState.get(key);
                        if ( tp!= null) {
                            out.collect(Tuple2.of(value,tp));
                        }else {
                            // 如果没有对应的tp, 表示现在没有对应个配置: 1. 真的没有配置, 2. 或者配置还没有来
                            // 考虑把数据先存入到一个集合中, 等一段时间之后, 再去单独的处理这个集合
                        }
                    }

                    @Override
                    public void processBroadcastElement(TableProcess value,
                                                        Context ctx,
                                                        Collector<Tuple2<JSONObject, TableProcess>> out) throws Exception {
                        // 处理广播流中的数据

                        // 1. 获取广播状态
                        BroadcastState<String, TableProcess> tpState = ctx.getBroadcastState(tpStateDesc);
                        // 2. 把配置写入到广播状态中
                        // 2.1 得到key
                        String key = value.getSourceTable()+":"+ value.getOperateType();
                        tpState.put(key,value);
                    }
                });


    }

    private SingleOutputStreamOperator<JSONObject> etl(DataStreamSource<String> sourceStream) {
       return sourceStream
                .map(JSON::parseObject)
                .filter(obj->
                    obj.getString("database")!=null
                            && obj.getString("table") != null
                            && obj.getString("type") != null
                            && (obj.getString("type").contains("insert") || obj.get("type").equals("update"))
                            && obj.getString("data") != null
                            && obj.getString("data").length() > 2
                );
    }

    private SingleOutputStreamOperator<TableProcess> readProcessTable(StreamExecutionEnvironment env) {
        StreamTableEnvironment tEnv = StreamTableEnvironment.create(env);
        tEnv
                .executeSql("create table process_table(" +
                        "  `source_table` string," +
                        "  `operate_type` string ," +
                        "  `sink_type` string ," +
                        "  `sink_table` string ," +
                        "  `sink_columns` string ," +
                        "  `sink_pk` string ," +
                        "  `sink_extend` string ," +
                        "   primary key(source_table,operate_type) not enforced" +
                        ")with( " +
                        " 'connector' = 'mysql-cdc'," +
                        " 'hostname' = 'hadoop162'," +
                        " 'port' = '3306'," +
                        " 'username' = 'root'," +
                        " 'password' = 'aaaaaa'," +
                        " 'database-name' = 'gmall2021_realtime'," +
                        " 'table-name' = 'table_process'," +
                        " 'debezium.snapshot.mode' = 'initial' " +//读取新增及变化
                        ")");


        Table table = tEnv
                .sqlQuery("select" +
                        "  source_table sourceTable, " +
                        "  sink_type sinkType, " +
                        "  operate_type operateType, " +
                        "  sink_table sinkTable, " +
                        "  sink_columns sinkColumns, " +
                        "  sink_pk sinkPk, " +
                        "  sink_extend sinkExtend " +
                        "from process_table");

        return tEnv
                .toRetractStream(table, TableProcess.class)
                .filter(tuple2->tuple2.f0)
                .map(tuple2->tuple2.f1);
    }
}
