package com.atguigu.gmall.realtime.app.dwd;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.atguigu.gmall.realtime.app.BaseAppV1;
import com.atguigu.gmall.realtime.bean.TableProcess;
import com.atguigu.gmall.realtime.common.Constant;
import com.atguigu.gmall.realtime.util.FlinkSinkUtil;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.state.BroadcastState;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.api.common.state.ReadOnlyBroadcastState;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.datastream.BroadcastStream;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.KeyedProcessFunction;
import org.apache.flink.streaming.api.functions.co.KeyedBroadcastProcessFunction;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.util.Collector;
import org.apache.flink.util.OutputTag;

/**
 * @Author lizhenchao@atguigu.cn
 * @Date 2021/9/28 14:06
 */
public class DwdDbApp extends BaseAppV1 {
    public static void main(String[] args) {
        new DwdDbApp().init(2002, 1, "DwdDbApp", "DwdDbApp", Constant.TOPIC_ODS_DB);
    }
    
    @Override
    public void run(StreamExecutionEnvironment env,
                    DataStreamSource<String> stream) {
        // 1. 对数据做etl
        SingleOutputStreamOperator<JSONObject> etledStream = etl(stream);
        // 2. 读取配置表数据
        SingleOutputStreamOperator<TableProcess> tpStream = readProcessTable(env);
        // 3. 数据流和配置流进程connect
        SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> connectedStream = connectStream(etledStream, tpStream);
        // 4. 过滤掉不需要sink的字段
        SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> filteredStream = filterColumns(connectedStream);
        // 5. 对数据进行动态分流
        Tuple2<SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>>, DataStream<Tuple2<JSONObject, TableProcess>>> kafkaHbaseStreams = dynamicSplit(filteredStream);
        // 6. 不同的路写入到不同的sink中
        writeToKafka(kafkaHbaseStreams.f0);
        writeToHbase(kafkaHbaseStreams.f1);
        
    }
    
    private void writeToHbase(DataStream<Tuple2<JSONObject, TableProcess>> stream) {
        // 写入到hbase, 通过Phoenix来实现
        /*
        1. 在Phoenix中建表
            根据来的维度数据, 进行动态建表: 当某个维度的第一条数据雇过来的时候, 去建表
        2. 向表中写入数据
            通过jdbc
            
        sink需要做两件事: 1. 建表 2. 写数据
            使用flink提供的jdbc sink不适用, 因为他只支持一个sql语句
            
            使用自定义sink
         */
        stream
            .keyBy(t -> t.f1.getSink_table())
            .addSink(FlinkSinkUtil.getPhoenixSink());
    }
    
    private void writeToKafka(SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> stream) {
        stream.addSink(FlinkSinkUtil.getKafkaSink());
        
    }
    
    private Tuple2<SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>>, DataStream<Tuple2<JSONObject, TableProcess>>> dynamicSplit(SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> filteredStream) {
        OutputTag<Tuple2<JSONObject, TableProcess>> hbaseTag = new OutputTag<Tuple2<JSONObject, TableProcess>>("hbaseTag") {};
        // 动态分层2个流:  一个流的数据写入数据到kafka, 另一个流的数据是写入到 HBase 中
        SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> kafkaStream = filteredStream
            .keyBy(t -> t.f1.getSource_table())
            .process(new KeyedProcessFunction<String, Tuple2<JSONObject, TableProcess>, Tuple2<JSONObject, TableProcess>>() {
                @Override
                public void processElement(Tuple2<JSONObject, TableProcess> value,
                                           Context ctx, Collector<Tuple2<JSONObject, TableProcess>> out) throws Exception {
                    String sinkType = value.f1.getSink_type();
                    
                    if (Constant.DWD_SINK_KAFKA.equals(sinkType)) {
                        out.collect(value);
                    } else if (Constant.DWD_SINK_HBASE.equals(sinkType)) {
                        ctx.output(hbaseTag, value);
                    }
                }
            });
        
        return Tuple2.of(kafkaStream, kafkaStream.getSideOutput(hbaseTag));
        
    }
    
    private SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> filterColumns(SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> stream) {
        return stream
            .map(new MapFunction<Tuple2<JSONObject, TableProcess>, Tuple2<JSONObject, TableProcess>>() {
                @Override
                public Tuple2<JSONObject, TableProcess> map(Tuple2<JSONObject, TableProcess> t) throws Exception {
                    JSONObject data = t.f0;
                    TableProcess tp = t.f1;
                    // id,name,...
                    String columns = tp.getSink_columns();
                    
                    // id
                    data.keySet().removeIf(column -> !columns.contains(column));
                    return t;
                }
            });
    }
    
    private SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> connectStream(SingleOutputStreamOperator<JSONObject> dataStream,
                                                                                       SingleOutputStreamOperator<TableProcess> tpStream) {
        MapStateDescriptor<String, TableProcess> tpDesc =
            new MapStateDescriptor<>("tpState", String.class, TableProcess.class);
        // 配置流做成广播流
        // 通过一个表名和操作类型, 就应该找到一个TableProcess
        // user_info:insert   ->  TableProcess
        BroadcastStream<TableProcess> tpBCStream = tpStream
            .broadcast(tpDesc);
        // 与数据流进行connect
        return dataStream
            .keyBy(obj -> obj.getString("table")) // 同一张表的数据, 进入同一组
            .connect(tpBCStream)
            .process(new KeyedBroadcastProcessFunction<String, JSONObject, TableProcess, Tuple2<JSONObject, TableProcess>>() {
                @Override
                public void processElement(JSONObject obj,
                                           ReadOnlyContext ctx,
                                           Collector<Tuple2<JSONObject, TableProcess>> out) throws Exception {
                    // 根据表和操作类型从广播状态中获取对应的配置数据
                    ReadOnlyBroadcastState<String, TableProcess> tpState = ctx.getBroadcastState(tpDesc);
                    
                    // 从状态中取出obj 对应的配置信息
                    String key = obj.getString("table") + ":" + obj.getString("type");
                    
                    TableProcess tp = tpState.get(key);
                    // tp有可能是null: 也行有些表不需要sink,则配置表就不会有配置
                    if (tp != null) {
                        // 只写data数据, 其他的省略
                        out.collect(Tuple2.of(obj.getJSONObject("data"), tp));
                    }
                    
                }
                
                @Override
                public void processBroadcastElement(TableProcess tp,
                                                    Context ctx,
                                                    Collector<Tuple2<JSONObject, TableProcess>> out) throws Exception {
                    // 把配置数据写入到广播状态
                    BroadcastState<String, TableProcess> tpState = ctx.getBroadcastState(tpDesc);
                    tpState.put(tp.getSource_table() + ":" + tp.getOperate_type(), tp);
                    
                }
            });
        
    }
    
    // 读取配置表数据
    private SingleOutputStreamOperator<TableProcess> readProcessTable(StreamExecutionEnvironment env) {
        StreamTableEnvironment tenv = StreamTableEnvironment.create(env);
        tenv.executeSql("create table table_process(" +
                            "   source_table string, " +
                            "   operate_type string, " +
                            "   sink_type string, " +
                            "   sink_table string, " +
                            "   sink_columns string, " +
                            "   sink_pk string, " +
                            "   sink_extend string," +
                            "   PRIMARY KEY (`source_table`,`operate_type`)NOT ENFORCED" +
                            ")with(" +
                            "   'connector' = 'mysql-cdc',\n" +
                            "   'hostname' = 'hadoop162',\n" +
                            "   'port' = '3306',\n" +
                            "   'username' = 'root',\n" +
                            "   'password' = 'aaaaaa',\n" +
                            "   'database-name' = 'gmall2021_realtime',\n" +
                            "   'table-name' = 'table_process', " +
                            // initial: job启动的时候会先读取表中所有数据(快照). 然后再使用mysql bin_log来捕获变化的数据
                            // never: 不读快照,  只mysql bin_log来捕获变化的数据
                            "   'debezium.snapshot.mode' = 'initial' " +
                            ")");
        
        Table table = tenv.sqlQuery("select * from table_process");
        
        return tenv
            .toRetractStream(table, TableProcess.class)
            .filter(t -> t.f0)
            .map(t -> t.f1);
        
    }
    
    private SingleOutputStreamOperator<JSONObject> etl(DataStreamSource<String> stream) {
        return stream
            .map(data -> JSON.parseObject(data.replaceAll("bootstrap-", "")))
            .filter(obj ->
                        obj.getString("database") != null
                            && obj.getString("table") != null
                            && obj.getString("data") != null
                            && obj.getString("data").length() > 10
                            && (obj.getString("type").equals("update") || obj.getString("type").equals("insert"))
            );
    }
}
