package com.atguigu.gmall.realtime.app.dwd;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.atguigu.gmall.realtime.app.BaseAppV1;
import com.atguigu.gmall.realtime.bean.TableProcess;
import com.atguigu.gmall.realtime.common.Constant;
import com.atguigu.gmall.realtime.util.FlinkSinkUtil;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.state.BroadcastState;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.api.common.state.ReadOnlyBroadcastState;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.datastream.BroadcastStream;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.ProcessFunction;
import org.apache.flink.streaming.api.functions.co.KeyedBroadcastProcessFunction;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.util.Collector;
import org.apache.flink.util.OutputTag;

import java.util.Arrays;
import java.util.List;

/**
 * @Author lizhenchao@atguigu.cn
 * @Date 2021/8/24 14:54
 */
public class DwdDb extends BaseAppV1 {
    public static void main(String[] args) {
        new DwdDb().init(2002, 1, "DwdDb", "DwdDb", Constant.TOPIC_ODS_DB);
    }
    
    @Override
    protected void run(StreamExecutionEnvironment env,
                       DataStreamSource<String> sourceStream) {
        // 1. 对业务数据做etl
        SingleOutputStreamOperator<JSONObject> etledStream = etl(sourceStream);
        // 2. 使用cdc去读取配置表的数据
        SingleOutputStreamOperator<TableProcess> tpStream = readTableProcess(env);
        // 3. 数据流和配置流进行connect
        DataStream<Tuple2<JSONObject, TableProcess>> connectedStream = connectStreams(etledStream, tpStream);
        // 4. 对每个数据, 只保留需要sink的列, 这个根据配置表中的 sink_columns来定
        DataStream<Tuple2<JSONObject, TableProcess>> filteredStream =  filterColumns(connectedStream);
        // 5. 动态分流
        Tuple2<DataStream<Tuple2<JSONObject, TableProcess>>, DataStream<Tuple2<JSONObject, TableProcess>>> kafkaHbaseStreams
            = dynamicSplit(filteredStream);
        // 6. 写入到sink中
        sink2kafka(kafkaHbaseStreams.f0);
        sink2Hbase(kafkaHbaseStreams.f1);
    }
    
    
    private void sink2Hbase(DataStream<Tuple2<JSONObject, TableProcess>> stream) {
        /*
         hbase + phoenix
            建表: 如何建? 通过代码动态去建表
                比如, 当第一条user_info来的时候, 这个时候去建个表, 以后再来就直接写入
            
            写数据: 通过 jdbc
            
          自定义sink:使用jdbc
         */
    
        stream
            .keyBy(t -> t.f1.getSink_table()) // 按照表名做分组, 保证同一张表的数据进入到一组
            .addSink(FlinkSinkUtil.getHbaseSink());
    }
    
    // 把流中的数据写入到Kafka中
    private void sink2kafka(DataStream<Tuple2<JSONObject, TableProcess>> stream) {
        stream.addSink(FlinkSinkUtil.getKafkaSink());
        
    }
    
    // 把不需要的列过滤掉
    private DataStream<Tuple2<JSONObject, TableProcess>> filterColumns(DataStream<Tuple2<JSONObject, TableProcess>> connectedStream) {
       return connectedStream.map(new MapFunction<Tuple2<JSONObject, TableProcess>, Tuple2<JSONObject, TableProcess>>() {
           @Override
           public Tuple2<JSONObject, TableProcess> map(Tuple2<JSONObject, TableProcess> value) throws Exception {
               JSONObject data = value.f0;
               TableProcess tp = value.f1;
               List<String> sinkColumns = Arrays.asList(tp.getSink_columns().split(","));
               // 把data中, 在tp.sink_columns中不存在的列删除
               // data是个JSONObject对象, 本质是一个map对象, 其实就是删除map中的key
               data.keySet().removeIf(key -> !sinkColumns.contains(key));
               return value;
           }
       });
       
       
       /*
       
               Set<String> keys = data.keySet();
               Iterator<String> it = keys.iterator();
               while (it.hasNext()) {
                   it
               }
        */
        
    }
    
    // 动态分流
    private Tuple2<DataStream<Tuple2<JSONObject, TableProcess>>, DataStream<Tuple2<JSONObject, TableProcess>>> dynamicSplit(DataStream<Tuple2<JSONObject, TableProcess>> connectedStream) {
        /*
        去hbase的单独在一个流
        去kafka的单独在一个流
         */
        OutputTag<Tuple2<JSONObject, TableProcess>> hbaseTag = new OutputTag<Tuple2<JSONObject, TableProcess>>("hbase") {};
        SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> kafkaStream = connectedStream
            .process(new ProcessFunction<Tuple2<JSONObject, TableProcess>, Tuple2<JSONObject, TableProcess>>() {
                @Override
                public void processElement(Tuple2<JSONObject, TableProcess> value,
                                           Context ctx,
                                           Collector<Tuple2<JSONObject, TableProcess>> out) throws Exception {
                    TableProcess tp = value.f1;
                    
                    if (Constant.SINK_KAFKA.equals(tp.getSink_type())) {
                        out.collect(value);
                    } else if (Constant.SINK_HBASE.equals(tp.getSink_type())) {
                        ctx.output(hbaseTag, value);
                    }
                }
            });
        DataStream<Tuple2<JSONObject, TableProcess>> hbaseStream = kafkaStream.getSideOutput(hbaseTag);
        
        return Tuple2.of(kafkaStream, hbaseStream);
    
    }
    
    //把数据流和控制录进行connect
    private DataStream<Tuple2<JSONObject, TableProcess>> connectStreams(SingleOutputStreamOperator<JSONObject> dataStream,
                                                                        SingleOutputStreamOperator<TableProcess> tpStream) {
        MapStateDescriptor<String, TableProcess> tpStateDesc =
            new MapStateDescriptor<>("tpState", String.class, TableProcess.class);
        
        /*
        dataStream 流中的每条数据将来都需要进入 dwd(事实表)或者dim(维度数据)
        
        得到一个新的流:   每个数据, 对应一个 TableProcess, 这个TableProcess用来说明这个条数据应该sink到哪
       
         */
        // 1. 把 控制流转成广播流
        // key: "user_info:insert"
        BroadcastStream<TableProcess> tpBCStream = tpStream
            .broadcast(tpStateDesc);
        
        // 2. 数据流换个广播流进行connect
        // 3. 对数据处理: 每来一条数据, 就去广播状态查找对应的TP
        return dataStream
            .keyBy(json -> json.getString("table"))
            .connect(tpBCStream)
            .process(new KeyedBroadcastProcessFunction<String, JSONObject, TableProcess, Tuple2<JSONObject, TableProcess>>() {
                @Override
                public void processElement(JSONObject input,
                                           ReadOnlyContext ctx,
                                           Collector<Tuple2<JSONObject, TableProcess>> out) throws Exception {
                    ReadOnlyBroadcastState<String, TableProcess> tpState = ctx.getBroadcastState(tpStateDesc);
                    // 当使用maxwell的bootstrap功能的时候, type='bootstrap-insert'
                    // "insert" "bootstrap-insert"
                    String key = input.getString("table") + ":" + input.getString("type").replaceAll("bootstrap-", "");
                    
                    TableProcess tp = tpState.get(key);
                    // 有些表可能不需要做分析, 配置就不会配置, 这个时候tp是null
                    if (tp != null) {
                        // 考虑到实际需求, 数据只取data部分, 其他的省略
                        out.collect(Tuple2.of(input.getJSONObject("data"), tp));
                    }
                    
                }
                
                @Override
                public void processBroadcastElement(TableProcess tp,
                                                    Context ctx,
                                                    Collector<Tuple2<JSONObject, TableProcess>> out) throws Exception {
                    // 获取广播状态
                    BroadcastState<String, TableProcess> tpState = ctx.getBroadcastState(tpStateDesc);
                    // 把读到的所有配置放入广播状态   "user_info:insert"
                    String key = tp.getSource_table() + ":" + tp.getOperate_type();
                    tpState.put(key, tp);
                }
            });
    }
    
    private SingleOutputStreamOperator<TableProcess> readTableProcess(StreamExecutionEnvironment env) {
        StreamTableEnvironment tenv = StreamTableEnvironment.create(env);
        
        tenv.executeSql("create table table_process(" +  // 临时表
                            "   source_table string, " +
                            "   operate_type string, " +
                            "   sink_type string, " +
                            "   sink_table string, " +
                            "   sink_columns string, " +
                            "   sink_pk string, " +
                            "   sink_extend string, " +
                            "   primary key(source_table, operate_type) not enforced " +
                            ")with(" +
                            "   'connector' = 'mysql-cdc'," +
                            "   'hostname' = 'hadoop162'," +
                            "   'port' = '3306',\n" +
                            "   'username' = 'root',\n" +
                            "   'password' = 'aaaaaa',\n" +
                            "   'database-name' = 'gmall2021_realtime',\n" +
                            "   'table-name' = 'table_process', " +  // mysql中的表
                            "   'debezium.snapshot.mode' = 'initial'" +
                            ")");
        // 读取配置: initial 每次启动先使用sql读的方式读取表中所有的数据, 读完之后, 再使用binlog的方式监控变化的数据
        
        Table table = tenv.from("table_process");
        
        return tenv
            .toRetractStream(table, TableProcess.class)
            .filter(t -> t.f0)
            .map(t -> t.f1);
    }
    
    // 过滤掉一些空值数据
    private SingleOutputStreamOperator<JSONObject> etl(DataStreamSource<String> sourceStream) {
        return sourceStream
            .map(JSON::parseObject)
            .filter(obj ->
                        obj.getString("database") != null
                            && obj.getString("table") != null
                            // 当bootstrap的时候, type='bootstrap-insert'
                            && (obj.getString("type").contains("insert") || obj.getString("type").equals("update"))
                            && obj.getString("data") != null
                            && obj.getString("data").length() > 2
            );
    }
}
