package com.atguigu.realtime.app.dwd;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.atguigu.realtime.app.BaseAppV1;
import com.atguigu.realtime.bean.TableProcess;
import com.atguigu.realtime.commont.Constant;
import com.atguigu.realtime.util.FlinkSinkUtil;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.state.BroadcastState;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.api.common.state.ReadOnlyBroadcastState;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.datastream.BroadcastStream;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.KeyedProcessFunction;
import org.apache.flink.streaming.api.functions.co.BroadcastProcessFunction;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.util.Collector;
import org.apache.flink.util.OutputTag;

import java.util.Arrays;
import java.util.List;

/**
 * @Author lizhenchao@atguigu.cn
 * @Date 2021/11/23 14:02
 */
public class DwdDbApp extends BaseAppV1 {
    public static void main(String[] args) {
        new DwdDbApp().init(2002, 1, "DwdDbApp", "DwdDbApp", Constant.TOPIC_ODS_DB);
    }
    
    @Override
    protected void run(StreamExecutionEnvironment env,
                       DataStreamSource<String> stream) {
        // 1. 对业务数据进行etl
        SingleOutputStreamOperator<JSONObject> eltedStream = etl(stream);
        
        // 2. 读取配置表的数据, 做成流(cdc)
        SingleOutputStreamOperator<TableProcess> tpStream = readProcessTable(env);
        // 3. 数据流和配置流进行connect
        SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> connectedStream = connect(eltedStream, tpStream);
        // 4. 删除不需要sink的字段
        SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> filteredStream = filterNoNeedColumns(connectedStream);
        // 5. 动态分流: 一个流到kafka, 不同表的数据写入到不同的topic.  一个流到hbase, 不同的维度数据写入到不同的表
        Tuple2<DataStream<Tuple2<JSONObject, TableProcess>>, DataStream<Tuple2<JSONObject, TableProcess>>> kafkaHbaseStreams = dynamicSplit(filteredStream);
        // 6. 写数据到kakfa和hbase中
        writeToKafka(kafkaHbaseStreams.f0);
        writeToHbase(kafkaHbaseStreams.f1);
    }
    
    private void writeToHbase(DataStream<Tuple2<JSONObject, TableProcess>> stream) {
        /*
        建表:
            1. 数据产生之前, 提前在hbase中建表
            2. 动态建表: 当某个维度表的数据, 第一条到来的时候, 先建表
            
        两个动作:
            建表
            
            插入数据
           
         能不能使用 JdbcSink?
            不能! 因为jdbcSink只能执行一个sql
            
         自定义sink
         */
        stream
            .keyBy(t -> t.f1.getSink_table())
            .addSink(FlinkSinkUtil.getPhoenixSink());
    }
    
    private void writeToKafka(DataStream<Tuple2<JSONObject, TableProcess>> stream) {
        stream.addSink(FlinkSinkUtil.getKafkaSink());
    }
    
    private Tuple2<DataStream<Tuple2<JSONObject, TableProcess>>, DataStream<Tuple2<JSONObject, TableProcess>>> dynamicSplit(SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> stream) {
        
        /*
        动态分离:
            一共两个流
                到kafka的
                到HBase
                
         process之前最好key一下, 会用到键控状态
            使用表名
         */
        OutputTag<Tuple2<JSONObject, TableProcess>> hbaseTag = new OutputTag<Tuple2<JSONObject, TableProcess>>(Constant.SINK_TYPE_HBASE) {};
        SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> kafkaStream = stream
            .keyBy(t -> t.f1.getSink_table())
            .process(new KeyedProcessFunction<String, Tuple2<JSONObject, TableProcess>, Tuple2<JSONObject, TableProcess>>() {
                @Override
                public void processElement(Tuple2<JSONObject, TableProcess> t,
                                           Context ctx,
                                           Collector<Tuple2<JSONObject, TableProcess>> out) throws Exception {
                    String sinkType = t.f1.getSink_type();
                    if (Constant.SINK_TYPE_KAFKA.equals(sinkType)) {
                        out.collect(t);
                    }else if(Constant.SINK_TYPE_HBASE.equals(sinkType)){
                        ctx.output(hbaseTag, t);
                    }
                }
            });
    
        DataStream<Tuple2<JSONObject, TableProcess>> hbaseStream = kafkaStream.getSideOutput(hbaseTag);
        
        return Tuple2.of(kafkaStream, hbaseStream);
    
    }
    
    private SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> filterNoNeedColumns(SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> stream) {
        return stream.map(new MapFunction<Tuple2<JSONObject, TableProcess>, Tuple2<JSONObject, TableProcess>>() {
            @Override
            public Tuple2<JSONObject, TableProcess> map(Tuple2<JSONObject, TableProcess> t) throws Exception {
                JSONObject data = t.f0;
                List<String> sinkColumns = Arrays.asList(t.f1.getSink_columns().split(","));
                
                data.keySet().removeIf(c -> !sinkColumns.contains(c));
                
                return t;
            }
        });
        
    }
    
    private SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> connect(SingleOutputStreamOperator<JSONObject> dataStream,
                                                                                 SingleOutputStreamOperator<TableProcess> tpStream) {
        MapStateDescriptor<String, TableProcess> tpStateDesc = new MapStateDescriptor<>("tpState", String.class, TableProcess.class);
        /*
        数据流中的每来一条数据, 应该去找一找对应的配置信息
        根据mysql的表名和操作类型 就应该找到一个 TableProcess 对象
        广播状态中存储:
            key                 value
            "user_info:insert"    TableProcess
         */
        // 1. 先把tpStream 做成广播流
        BroadcastStream<TableProcess> tpBcStream = tpStream.broadcast(tpStateDesc);
        
        // 2. dataStream和广播流进行connect
        return dataStream
            .connect(tpBcStream)
            .process(new BroadcastProcessFunction<JSONObject, TableProcess, Tuple2<JSONObject, TableProcess>>() {
                @Override
                public void processElement(JSONObject value,
                                           ReadOnlyContext ctx,
                                           Collector<Tuple2<JSONObject, TableProcess>> out) throws Exception {
                    ReadOnlyBroadcastState<String, TableProcess> tpState = ctx.getBroadcastState(tpStateDesc);
                    
                    String key = value.getString("table") + ":" + value.getString("type");
                    // 有些表不需要做实时分析, 所有配置中是没有的, 这里的tp就是null
                    TableProcess tp = tpState.get(key);
                    if (tp != null) {
                        //为了简化数据结果, 只保留data数据
                        out.collect(Tuple2.of(value.getJSONObject("data"), tp));
                    }
                }
                
                @Override
                public void processBroadcastElement(TableProcess tp,
                                                    Context ctx,
                                                    Collector<Tuple2<JSONObject, TableProcess>> out) throws Exception {
                    // 把配置信息写入到广播状态中
                    BroadcastState<String, TableProcess> tpState = ctx.getBroadcastState(tpStateDesc);
                    String key = tp.getSource_table() + ":" + tp.getOperate_type();
                    tpState.put(key, tp);
                }
            });
        
    }
    
    private SingleOutputStreamOperator<TableProcess> readProcessTable(StreamExecutionEnvironment env) {
        /*
        读取配置表数据:
         flink cdc 可以直接从mysql中实时的监控数据的变化
         */
        StreamTableEnvironment tenv = StreamTableEnvironment.create(env);
        
        tenv.executeSql("create table tp( " +
                            "   source_table string, " +
                            "   operate_type string, " +
                            "   sink_type string, " +
                            "   sink_table string, " +
                            "   sink_columns string, " +
                            "   sink_pk string, " +
                            "   sink_extend string, " +
                            "   primary key (source_table, operate_type) not enforced " +
                            ")with(" +
                            " 'connector' = 'mysql-cdc',\n" +
                            " 'hostname' = 'hadoop162',\n" +
                            " 'port' = '3306',\n" +
                            " 'username' = 'root',\n" +
                            " 'password' = 'aaaaaa',\n" +
                            " 'database-name' = 'gmall2021_realtime',\n" +
                            " 'table-name' = 'table_process', \n" +
                            "  'scan.startup.mode' = 'initial' " + // 先读快照, 然后监听最新的binlog
                            ")");
        Table tpTable = tenv.from("tp");
        return tenv
            .toRetractStream(tpTable, TableProcess.class)
            .filter(t -> t.f0)
            .map(t -> t.f1);
        
    }
    
    private SingleOutputStreamOperator<JSONObject> etl(DataStreamSource<String> stream) {
        return stream
            .map(data -> JSON.parseObject(data.replaceAll("bootstrap-", "")))
            .filter(obj ->
                        obj.getString("database") != null
                            && obj.getString("table") != null
                            && ("insert".equals(obj.getString("type")) || "update".equals(obj.getString("type")))
                            && obj.getJSONObject("data") != null
            );
    }
}
