package com.atguigu.gmall.realtime.app.dwd;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.atguigu.gmall.realtime.app.BaseAppV1;
import com.atguigu.gmall.realtime.bean.TableProcess;
import com.atguigu.gmall.realtime.common.Constant;
import com.atguigu.gmall.realtime.util.FlinkSinkUtil;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.state.BroadcastState;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.api.common.state.ReadOnlyBroadcastState;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.datastream.BroadcastStream;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.ProcessFunction;
import org.apache.flink.streaming.api.functions.co.BroadcastProcessFunction;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.util.Collector;
import org.apache.flink.util.OutputTag;

import java.util.Arrays;
import java.util.List;


/**
 * @Author lzc
 * @Date 2022/4/17 9:19
 */
public class DwdDbApp extends BaseAppV1 {
    public static void main(String[] args) {
        new DwdDbApp().init("DwdDbApp",
                            2002,
                            1,
                            "DwdDbApp",
                            "DwdDbApp", Constant.TOPIC_ODS_DB
        );
    }
    
    @Override
    public void handle(StreamExecutionEnvironment env,
                       DataStreamSource<String> stream) {
        // 1. 数据清洗
        SingleOutputStreamOperator<JSONObject> etledStream = etl(stream);
        // 2. 读取配置表数据
        SingleOutputStreamOperator<TableProcess> tpStream = readProcessTable(env);
        // 3. 业务数据和配置数据connect
        SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> dataTpStream = connect(etledStream, tpStream);
        // 4. 删除不需要sink的字段
        SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> filteredStream = filterNoSinkColumns(dataTpStream);
        // 5. 动态分流
        Tuple2<DataStream<Tuple2<JSONObject, TableProcess>>, DataStream<Tuple2<JSONObject, TableProcess>>> kafkaHbaseStreams = dynamicSplit(filteredStream);
        
        // 6. 不同的数据写入到不同的topic中
        writeToKafka(kafkaHbaseStreams.f0);  // two -> to   log4j -> log4J  k8s  i18n 国际化
        writeToPhoenix(kafkaHbaseStreams.f1);
        
        
    }
    
    private void writeToPhoenix(DataStream<Tuple2<JSONObject, TableProcess>> stream) {
        /*
        通过phoenix把数据写入到hbase的表中
        1. 先建表
            手动建表
                不灵活
            
            自动建表:当某个表的第一条数据进来时候, 先建表
            
        2. 再向表中写数据
            jdbc写法
            
        ----------------
        自定义sink
            1. 能否使用jdbc sink
                不能! jdbc sink 只支持一个sql语句, 我们现在有两个: 建表  插入
            
            
            2. 如果不能, 完全自定义
                SinkFunction
        
        
        
        
        
         */
        stream
            .keyBy(t -> t.f1.getSink_table())
            .addSink(FlinkSinkUtil.getPhoenixSink());
        
        
        
    }
    
    private void writeToKafka(DataStream<Tuple2<JSONObject, TableProcess>> stream) {
        stream.addSink(FlinkSinkUtil.getKafkaSink());
    }
    
    // 动态分流
    private Tuple2<DataStream<Tuple2<JSONObject, TableProcess>>, DataStream<Tuple2<JSONObject, TableProcess>>> dynamicSplit(
        SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> stream) {
        OutputTag<Tuple2<JSONObject, TableProcess>> hbaseTag = new OutputTag<Tuple2<JSONObject, TableProcess>>("hbase") {};
        // 主流: kafka  测输出流: hbase
        SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> kafkaStream = stream
            .process(new ProcessFunction<Tuple2<JSONObject, TableProcess>, Tuple2<JSONObject, TableProcess>>() {
                @Override
                public void processElement(Tuple2<JSONObject, TableProcess> value,
                                           Context ctx,
                                           Collector<Tuple2<JSONObject, TableProcess>> out) throws Exception {
                    
                    String sinkType = value.f1.getSink_type();
                    if (Constant.SINK_KAFKA.equals(sinkType)) {
                        out.collect(value);
                    } else if (Constant.SINK_HBASE.equals(sinkType)) {
                        ctx.output(hbaseTag, value);
                    }
                    
                }
            });
        
        DataStream<Tuple2<JSONObject, TableProcess>> hbaseStream = kafkaStream.getSideOutput(hbaseTag);
        
        return Tuple2.of(kafkaStream, hbaseStream);
        
    }
    
    private SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> filterNoSinkColumns(
        SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> dataTpStream) {
        
        return dataTpStream.map(new MapFunction<Tuple2<JSONObject, TableProcess>, Tuple2<JSONObject, TableProcess>>() {
            @Override
            public Tuple2<JSONObject, TableProcess> map(Tuple2<JSONObject, TableProcess> t) throws Exception {
                JSONObject data = t.f0;
                List<String> sinkColumns = Arrays.asList(t.f1.getSink_columns().split(","));
                // 删除map中的键值对
                data.keySet().removeIf(key -> !sinkColumns.contains(key));
                
                return t;
            }
        });
        
    }
    
    private SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> connect(
        SingleOutputStreamOperator<JSONObject> dataStream,
        SingleOutputStreamOperator<TableProcess> tpStream) {
        MapStateDescriptor<String, TableProcess> tpStateDesc = new MapStateDescriptor<>("tpState", String.class, TableProcess.class);
        
        // 1. 先把配置流做成广播流
        /*
        广播状态的本质是map
        key source_table:type
            user_info:insert
            
        value
            TableProcess
         */
        BroadcastStream<TableProcess> tpBcStream = tpStream
            .broadcast(tpStateDesc);
        
        // 2. 数据流去connect 广播流吧
        // 一条数据对应一个配置信息
        return dataStream
            .connect(tpBcStream)
            .process(new BroadcastProcessFunction<JSONObject, TableProcess, Tuple2<JSONObject, TableProcess>>() {
                @Override
                public void processElement(JSONObject value,
                                           ReadOnlyContext ctx,
                                           Collector<Tuple2<JSONObject, TableProcess>> out) throws Exception {
                    
                    // 4. 处理数据流的时候, 从广播状态读取配置信息
                    String key = value.getString("table") + ":" + value.getString("type");
                    
                    ReadOnlyBroadcastState<String, TableProcess> tpState = ctx.getBroadcastState(tpStateDesc);
                    
                    // 如果有些表不需要分析, 配置表中不会做配置, tp就是null
                    TableProcess tp = tpState.get(key);
                    if (tp != null) {
                        // 最后流中的数据, 只要data的值
                        out.collect(Tuple2.of(value.getJSONObject("data"), tp));
                    }
                    
                }
                
                @Override
                public void processBroadcastElement(TableProcess value,
                                                    Context ctx,
                                                    Collector<Tuple2<JSONObject, TableProcess>> out) throws Exception {
                    // 3. 把配置数据放入广播状态
                    BroadcastState<String, TableProcess> tpState = ctx.getBroadcastState(tpStateDesc);
                    // user_info:insert
                    String key = value.getSource_table() + ":" + value.getOperate_type();
                    tpState.put(key, value);
                }
            });
        
        
    }
    
    private SingleOutputStreamOperator<TableProcess> readProcessTable(StreamExecutionEnvironment env) {
        StreamTableEnvironment tEnv = StreamTableEnvironment.create(env);
        tEnv.executeSql("CREATE TABLE `table_process` (" +
                            "  `source_table` varchar," +
                            "  `operate_type` varchar," +
                            "  `sink_type` varchar," +
                            "  `sink_table` varchar," +
                            "  `sink_columns` varchar," +
                            "  `sink_pk` varchar," +
                            "  `sink_extend` varchar," +
                            "  PRIMARY KEY (`source_table`,`operate_type`) not enforced" +
                            ")with(" +
                            " 'connector' = 'mysql-cdc'," +
                            " 'hostname' = 'hadoop162'," +
                            " 'port' = '3306'," +
                            " 'username' = 'root'," +
                            " 'password' = 'aaaaaa'," +
                            " 'database-name' = 'gmall2022_realtime'," +
                            " 'table-name' = 'table_process', " +
                            // 程序启动的时候, 会先读取快照(表中所有的数据), 然后再跟进binlog来监控表的变化
                            " 'debezium.snapshot.mode' = 'initial'" +
                            ")");
        
        
        Table table = tEnv.from("table_process");
        //把表转成流
        return tEnv
            .toRetractStream(table, TableProcess.class)
            .filter(t -> t.f0)
            .map(t -> t.f1);
        
        
    }
    
    private SingleOutputStreamOperator<JSONObject> etl(DataStreamSource<String> stream) {
        return stream
            .map(data -> JSON.parseObject(data.replaceAll("bootstrap-", "")))
            .filter(obj ->
                        "gmall2022".equals(obj.getString("database"))
                            && obj.getString("table") != null
                            && ("insert".equals(obj.getString("type")) || "update".equals(obj.getString("type")))
                            && obj.getString("data") != null
                            && obj.getString("data").length() > 2
            );
    }
}
