package com.fulu.realtime.app;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.fulu.realtime.bean.TableProcess;
import com.fulu.realtime.common.Constant;
import com.fulu.realtime.utils.FlinkSinkUtil;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.state.BroadcastState;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.api.common.state.ReadOnlyBroadcastState;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.datastream.BroadcastStream;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.ProcessFunction;
import org.apache.flink.streaming.api.functions.co.BroadcastProcessFunction;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.util.Collector;
import org.apache.flink.util.OutputTag;

public class DwdDbApp extends BaseAppV1 {
    
    public static void main(String[] args) {
        new DwdDbApp().init(2002, 1, "DwdDbApp", "DwdDbApp", Constant.LIVELY_TIME_TOPIC);
    }
    
    @Override
    public void run(StreamExecutionEnvironment env,
                    DataStreamSource<String> stream) {
        // 1. 对业务数据做etl, 数据流
        SingleOutputStreamOperator<JSONObject> eltedStream = etl(stream);
        
        // 2. 读取配置表的数据, 配置流
        SingleOutputStreamOperator<TableProcess> tpStream = readTableProcess(env);
        
        // 3. 把配置流进行广播得到广播流与数据流进行connect
        SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> connectedStream = connectStreams(eltedStream, tpStream);
        // 4. 把数据中不需要的列过滤掉
        SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> filteredStream = filterColumns(connectedStream);
        // 5. 根据配置流的的配置信息, 对数据流中的数据进行动态分流(到kafka的和到hbase的)
        Tuple2<SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>>, DataStream<Tuple2<JSONObject, TableProcess>>> kafkaHbaseStreams = dynamicSplit(filteredStream);
        // 6. 不同的流写入到不同的sink中
        writeToKafka(kafkaHbaseStreams.f0);
        writeToHbase(kafkaHbaseStreams.f1);
    }
    
    private void writeToHbase(DataStream<Tuple2<JSONObject, TableProcess>> stream) {
        /*
         能不能使用jdbc sink?  只能执行一个sql语句
         
         1. 建表  2. 写数据
         需要自定义sink, 通过Phoenix
       
         */
        stream
            .keyBy(t -> t.f1.getSink_table())
            .addSink(FlinkSinkUtil.getPhoenixSink());
        
    }
    
    private void writeToKafka(SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> stream) {
        // 写到同一个topic的数据, 最后放在同一组, 这样可以提升效率
        stream
            .keyBy(t -> t.f1.getSink_table())
            .addSink(FlinkSinkUtil.getKafkaSink());
        
        
    }
    
    private Tuple2<SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>>, DataStream<Tuple2<JSONObject, TableProcess>>> dynamicSplit(SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> stream) {
        
        OutputTag<Tuple2<JSONObject, TableProcess>> hbaseTag = new OutputTag<Tuple2<JSONObject, TableProcess>>("hbase") {};
        // 主流到 kafka, 侧输出流到hbase
        SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> kafkaStream = stream.process(new ProcessFunction<Tuple2<JSONObject, TableProcess>, Tuple2<JSONObject, TableProcess>>() {
            @Override
            public void processElement(Tuple2<JSONObject, TableProcess> value,
                                       Context ctx,
                                       Collector<Tuple2<JSONObject, TableProcess>> out) throws Exception {
                String sink_type = value.f1.getSink_type();
                if (Constant.CLICK_HOUSE_DRIVER.equals(sink_type)) {
                    out.collect(value);
                } else if (Constant.KAFKA_BROKERS.equals(sink_type)) {
                    ctx.output(hbaseTag, value);
                }
            }
        });
        DataStream<Tuple2<JSONObject, TableProcess>> hbaseStream = kafkaStream.getSideOutput(hbaseTag);
        return Tuple2.of(kafkaStream, hbaseStream);
        
    }
    
    private SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> filterColumns(SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> stream) {
        // 删除那些不要的列
        return stream.map(new MapFunction<Tuple2<JSONObject, TableProcess>, Tuple2<JSONObject, TableProcess>>() {
            @Override
            public Tuple2<JSONObject, TableProcess> map(Tuple2<JSONObject, TableProcess> value) throws Exception {
                JSONObject data = value.f0;
                TableProcess tp = value.f1;
                String sink_columns = tp.getSink_columns();
                
                // 删除掉 data这map中的部分的k-v
                data.keySet().removeIf(key -> !sink_columns.contains(key));
                
                return value;
            }
        });
        
    }
    
    private SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> connectStreams(SingleOutputStreamOperator<JSONObject> dataStream,
                                                                                        SingleOutputStreamOperator<TableProcess> tpStream) {
        // 每来一条数据 JSONObject, 需要找到一个对应的 TableProcess 对象
        //user_info:insert
        MapStateDescriptor<String, TableProcess> tpStateDesc = new MapStateDescriptor<>("tpState", String.class, TableProcess.class);
        // 把配置做成广播流, 然后进连接
        BroadcastStream<TableProcess> tpBCStream = tpStream.broadcast(tpStateDesc);
        
        // JSONObject -> TableProcess
        return dataStream
            .connect(tpBCStream)
            .process(new BroadcastProcessFunction<JSONObject, TableProcess, Tuple2<JSONObject, TableProcess>>() {
                @Override
                public void processElement(JSONObject value,
                                           ReadOnlyContext ctx,
                                           Collector<Tuple2<JSONObject, TableProcess>> out) throws Exception {
                    // 处理数据流的数据
                    ReadOnlyBroadcastState<String, TableProcess> tpState = ctx.getBroadcastState(tpStateDesc);
                    String key = value.getString("table") + ":" + value.getString("type");
                    TableProcess tp = tpState.get(key);
                    // 有些表不需要做sink, 所以配置文件中是没有配置, 这里就是null
                    if (tp != null) {
                        out.collect(Tuple2.of(value.getJSONObject("data"), tp));  // 向下游传递的是只保留的data数据
                    }
                }
                
                @Override
                public void processBroadcastElement(TableProcess tp,
                                                    Context ctx,
                                                    Collector<Tuple2<JSONObject, TableProcess>> out) throws Exception {
                    // 处理广播流的数据: 其实就是把广播流的数据写入到广播状态中
                    BroadcastState<String, TableProcess> tpState = ctx.getBroadcastState(tpStateDesc);
                    String key = tp.getSource_table() + ":" + tp.getOperate_type();
                    tpState.put(key, tp);
                }
            });
        
    }
    
    private SingleOutputStreamOperator<TableProcess> readTableProcess(StreamExecutionEnvironment env) {
        StreamTableEnvironment tenv = StreamTableEnvironment.create(env);
        
        tenv.executeSql("CREATE TABLE tp (" +
                            " source_table string, " +
                            " operate_type string, " +
                            " sink_type string, " +
                            " sink_table string, " +
                            " sink_columns string, " +
                            " sink_pk string, " +
                            " sink_extend string, " +
                            " primary key(source_table, operate_type) not enforced " +
                            ") WITH (" +
                            " 'connector' = 'mysql-cdc'," +
                            " 'hostname' = 'hadoop162'," +
                            " 'port' = '3306'," +
                            " 'username' = 'root'," +
                            " 'password' = 'aaaaaa'," +
                            " 'database-name' = 'gmall2021_realtime'," +
                            " 'table-name' = 'table_process', " +
                            " 'debezium.snapshot.mode' = 'initial' " +  // 程序启动的时候读取表中所有的数据, 然后再使用bin_log监控所有的变化
                            ")");
        
        Table table = tenv.sqlQuery("select * from tp");
        
        return tenv
            .toRetractStream(table, TableProcess.class)
            .filter(t -> t.f0)
            .map(t -> t.f1);
        
    }
    
    private SingleOutputStreamOperator<JSONObject> etl(DataStreamSource<String> stream) {
        return stream
            .map(line -> JSON.parseObject(line.replaceAll("bootstrap-", "")))
            .filter(obj ->
                        obj.containsKey("database")
                            && obj.containsKey("table")
                            && obj.containsKey("type")
                            && ("insert".equals(obj.getString("type")) || "update".equals(obj.getString("type")))
                            && obj.containsKey("data")
                            && obj.getString("data").length() > 10);
    }
}
