package com.atguigu.gmall.app.dwd;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.atguigu.gmall.app.BaseAppV1;
import com.atguigu.gmall.bean.TableProcess;
import com.atguigu.gmall.common.Constant;
import com.atguigu.gmall.util.FlinkSinkUtil;
import org.apache.flink.api.common.state.BroadcastState;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.api.common.state.ReadOnlyBroadcastState;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.datastream.BroadcastStream;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.ProcessFunction;
import org.apache.flink.streaming.api.functions.co.KeyedBroadcastProcessFunction;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.util.Collector;
import org.apache.flink.util.OutputTag;

import java.util.Arrays;
import java.util.List;

/**
 * @Author lizhenchao@atguigu.cn
 * @Date 2021/7/28 14:05
 * // 一个从 ods_db读取
 * // 配置表的不走maxwell, 而是使用flink-sql cdc 直接读取
 */
public class DwdDbApp extends BaseAppV1 {
    
    public static void main(String[] args) {
        new DwdDbApp().init(
            2002,
            1,
            "DwdDbApp",
            "DwdDbApp",
            Constant.TOPIC_ODS_DB
        );
    }
    
    @Override
    protected void run(StreamExecutionEnvironment env,
                       DataStreamSource<String> dataStream) {
        // 1. 对数据进行etl
        SingleOutputStreamOperator<JSONObject> etledStream = etlDataStream(dataStream);
        // 2. 读取配置表的数据, 得到一个配置流(cdc)
        SingleOutputStreamOperator<TableProcess> tableProcessStream = readTableProcess(env);
        // 3. 数据流和配置流进行connect
        SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> connectedStreams = connectStreams(etledStream, tableProcessStream);
        // 4. 每条数据根据他的配置, 进行动态分流
        Tuple2<SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>>, DataStream<Tuple2<JSONObject, TableProcess>>> kafkaHbaseStreams = dynamicSplit(connectedStreams);
        // kafkaHbaseStreams.f0.print("kafka");
        // 3.1 事实表写入到kafka
        sendToKafka(kafkaHbaseStreams.f0);
        // 3.2 维度表写入到hbase
        sendToHbase(kafkaHbaseStreams.f1);
        
    }
    
    private void sendToHbase(DataStream<Tuple2<JSONObject, TableProcess>> stream) {
        // 向hbase(Phoenix)写入数据的时候, 表不会自动创建
        // 1. 先创建表 动态创建
        // 2. 再写入
        stream
            .keyBy(t -> t.f1.getSink_table())
            .addSink(FlinkSinkUtil.getHbaseSink());
        
    }
    
    private void sendToKafka(SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> stream) {
        stream.addSink(FlinkSinkUtil.getKafkaSink());
    }
    
    private Tuple2<SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>>, DataStream<Tuple2<JSONObject, TableProcess>>> dynamicSplit(SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> connectedStreams) {
        OutputTag<Tuple2<JSONObject, TableProcess>> hbaseTag = new OutputTag<Tuple2<JSONObject, TableProcess>>("hbase") {};
        SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> kafkaStream = connectedStreams.process(new ProcessFunction<Tuple2<JSONObject, TableProcess>, Tuple2<JSONObject, TableProcess>>() {
            @Override
            public void processElement(Tuple2<JSONObject, TableProcess> input,
                                       Context ctx,
                                       Collector<Tuple2<JSONObject, TableProcess>> out) throws Exception {
                Tuple2<JSONObject, TableProcess> data = Tuple2.of(input.f0.getJSONObject("data"), input.f1);
                // 其实这个地方应该根据sink_cloumns的值进行一个过滤, 只保留需要sink的字段
                filterColumns(data);
                
                String sinkType = input.f1.getSink_type();
                if (TableProcess.SINK_TYPE_KAFKA.equals(sinkType)) {
                    // 把去kafka的数据进入到主流
                    out.collect(data);
                    
                } else if (TableProcess.SINK_TYPE_HBASE.equals(sinkType)) {
                    ctx.output(hbaseTag, data);
                }
            }
    
            
            private void filterColumns(Tuple2<JSONObject, TableProcess> data) {
                JSONObject jsonData = data.f0;  // 列->值
                // 这个list只能读不能删
                List<String> columns = Arrays.asList(data.f1.getSink_columns().split(","));
                // 需要sink的所有列
                jsonData.keySet().removeIf(key -> !columns.contains(key));
            }
        });
        DataStream<Tuple2<JSONObject, TableProcess>> hbaseStream = kafkaStream.getSideOutput(hbaseTag);
        return Tuple2.of(kafkaStream, hbaseStream);
    }
    
    private SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> connectStreams(SingleOutputStreamOperator<JSONObject> etledStream,
                                                                                        SingleOutputStreamOperator<TableProcess> tableProcessStream) {
        MapStateDescriptor<String, TableProcess> tpStateDesc = new MapStateDescriptor<>("tpState", String.class, TableProcess.class);
        /*
            动态分流
                目标: 应该得到一个新的流, 新的流存储的数据类型应该是一个二维元组
                <JSONObject, TableProcess>
                
            碰到一条数据流中的数据, 找一个TableProcess
            key: source_table:operate_type
            value: TableProcess
                
         */
        // 1`. DataStreamSink 做成广播流
        BroadcastStream<TableProcess> tpBroadcastStream = tableProcessStream.broadcast(tpStateDesc);
        // 2. 广播流与数据流进行connect
        return etledStream
            .keyBy(obj -> obj.getString("table"))
            .connect(tpBroadcastStream)
            .process(new KeyedBroadcastProcessFunction<String, JSONObject, TableProcess, Tuple2<JSONObject, TableProcess>>() {
                @Override
                public void processElement(JSONObject input,
                                           ReadOnlyContext ctx,
                                           Collector<Tuple2<JSONObject, TableProcess>> out) throws Exception {
                    ReadOnlyBroadcastState<String, TableProcess> tpState = ctx.getBroadcastState(tpStateDesc);
                    
                    String key = input.getString("table") + ":" + input.getString("type").replaceAll("bootstrap-", "");
                    
                    TableProcess tp = tpState.get(key);
                    // tp如果是null证明这条数据不需要后面处理
                    if (tp != null) {
                        out.collect(Tuple2.of(input, tp));  // 可以只把data数据向后传
                    }
                }
                
                @Override
                public void processBroadcastElement(TableProcess tp,
                                                    Context ctx,
                                                    Collector<Tuple2<JSONObject, TableProcess>> out) throws Exception {
                    // 把来的每条配置都写入到广播状态中
                    BroadcastState<String, TableProcess> tpState = ctx.getBroadcastState(tpStateDesc);
                    String key = tp.getSource_table() + ":" + tp.getOperate_type();
                    tpState.put(key, tp);
                }
            });
        
        // 3. 给数据流中的每条数据配置一个TableProcess
    }
    
    private SingleOutputStreamOperator<TableProcess> readTableProcess(StreamExecutionEnvironment env) {
        // 1. 第一次读读取全部数据
        // 2. 以后监控mysql中这个配置表的数据的更新
        
        StreamTableEnvironment tenv = StreamTableEnvironment.create(env);
        
        tenv.executeSql("CREATE TABLE `table_process` (\n" +
                            "  `source_table` string,\n" +
                            "  `operate_type` string ,\n" +
                            "  `sink_type` string ,\n" +
                            "  `sink_table` string ,\n" +
                            "  `sink_columns`  string ,\n" +
                            "  `sink_pk`  string,\n" +
                            "  `sink_extend` string ,\n" +
                            "  PRIMARY KEY (`source_table`,`operate_type`)  NOT ENFORCED\n" +
                            ")with(" +
                            "   'connector' = 'mysql-cdc', " +
                            "   'hostname' = 'hadoop162', " +
                            "   'port' = '3306', " +
                            "   'username' = 'root', " +
                            "   'password' = 'aaaaaa', " +
                            "   'database-name' = 'gmall2021_realtime', " +
                            "   'table-name' = 'table_process', " +
                            "   'debezium.snapshot.mode' = 'initial' " +
                            ")");
        // initial: 启动的时候会读取表中所有的数据, 放在内存中, 全部数据读取完成之后, 会使用binlog来监控mysql的变化
        // never: 只用binlog来监控mysql的变化
        
        Table table = tenv.from("table_process");
        
        return tenv
            .toRetractStream(table, TableProcess.class)
            .filter(t -> t.f0)
            .map(t -> t.f1);
        
    }
    
    private SingleOutputStreamOperator<JSONObject> etlDataStream(DataStreamSource<String> dataStream) {
        
        return dataStream
            .map(JSON::parseObject)
            .filter(obj ->
                        obj.getString("database") != null
                            && obj.getString("table") != null
                            && obj.getString("type") != null
                            && (obj.getString("type").contains("insert") || "update".equals(obj.getString("type")))
                            && obj.getString("data") != null
                            && obj.getString("data").length() > 10
            );
        
    }
    
}
