package com.atguigu.gmall.realtime.app.dwd;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.atguigu.gmall.realtime.app.BaseAppV1;
import com.atguigu.gmall.realtime.bean.TableProcess;
import com.atguigu.gmall.realtime.common.Constant;
import com.atguigu.gmall.realtime.util.FlinkSinkUtil;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.state.BroadcastState;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.api.common.state.ReadOnlyBroadcastState;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.datastream.BroadcastStream;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.ProcessFunction;
import org.apache.flink.streaming.api.functions.co.BroadcastProcessFunction;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.util.Collector;
import org.apache.flink.util.OutputTag;

import java.io.Serializable;
import java.util.Arrays;
import java.util.List;

/**
 * @Author lzc
 * @Date 2022/3/16 14:01
 */
public class DwdDbApp extends BaseAppV1 implements Serializable {
    
    
    public static void main(String[] args) {
        new DwdDbApp().init(2002,
                            1,
                            "DwdDbApp",
                            "DwdDbApp",
                            Constant.TOPIC_ODS_DB
        );
    }
    
    @Override
    protected void handle(StreamExecutionEnvironment env,
                          DataStreamSource<String> stream) {
        //        stream.print("pre");
        
        // 1. 对业务数据做etc
        SingleOutputStreamOperator<JSONObject> etledStream = etl(stream);
        // 2. 读取配置表的数据
        SingleOutputStreamOperator<TableProcess> tpSteam = readTableProcess(env);
        
        // 3. 数据流和配置流进行connect, 实现动态分流
        SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> connectedStream = connectStreams(
            etledStream, tpSteam);
        
        // 4. 过滤掉不需要的字段
        SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> filterColumnsStream = filterColumns(
            connectedStream);
        
        // 5. 动态分流
        Tuple2<SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>>, DataStream<Tuple2<JSONObject, TableProcess>>> kafkaHbaseStreams =
            dynamicSplitStream(filterColumnsStream);
        
        // 6. 不同的数据写入到不同的sink
        writeToKakfa(kafkaHbaseStreams.f0);
        writeToHbase(kafkaHbaseStreams.f1);
    }
    
    private void writeToHbase(DataStream<Tuple2<JSONObject, TableProcess>> stream) {
        /*
         维度表的数据写入到HBase中, 通过phoenix写入
         
         1. phoenix的表不会自动创建, 需要我们创建
            a: 手动提前创建
               优点: 比较简单, 代码就不用考虑建表
               缺点: 不够灵活
            
            b: 通过代码自动创建:当某个维度表的第一条进来的时候, 执行一个sql, 创建这个维度表
                优点: 可以自动适应配置变化, 比较灵活.
                缺点: 代码实现起来比较复杂
                
        
        2. 采用 b 种方式
            1. 能不能 使用flink提供的jdbc sink
                
                这次想phoenix写数据的时候, 需要几个sql语句?
                    2个sql: 建表   插入数据的
                    
                    jdbc sink 只能使用一个sql(插入数据集的sql), 所以不能使用jdbc sink
             2. 使用自定义sink
                 使用jdbc来建表和插入数据
         
         */
        
        stream
            .keyBy(t -> t.f1.getSink_table())  //为了后面试用键控状态, 这里必须keyBy
            .addSink(FlinkSinkUtil.getPhoenixSink());
    }
    
    private void writeToKakfa(SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> stream) {
        /*
        把数据写入到kafka中
         */
        stream
            .addSink(FlinkSinkUtil.getKafkaSink());
        
    }
    
    private Tuple2<SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>>, DataStream<Tuple2<JSONObject, TableProcess>>> dynamicSplitStream(
        SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> stream) {
        /*
        数据一共有两个sink:  kafka hbase(phoenix)
        
        分两个流: 一个流(事实表)到kafka 一个流(维度表)到phoenix  执行sql , 写明表名
        
        主流  kafka
        侧输出流 phoenix
        
         */
        OutputTag<Tuple2<JSONObject, TableProcess>> hbaseTag =
            new OutputTag<Tuple2<JSONObject, TableProcess>>("hbase") {};
        
        SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> kafkaStream = stream
            .process(new ProcessFunction<Tuple2<JSONObject, TableProcess>, Tuple2<JSONObject, TableProcess>>() {
                @Override
                public void processElement(Tuple2<JSONObject, TableProcess> value,
                                           Context ctx,
                                           Collector<Tuple2<JSONObject, TableProcess>> out) throws Exception {
                    String sinkType = value.f1.getSink_type();
                    
                    if (Constant.SINK_KAFKA.equals(sinkType)) {
                        out.collect(value);
                        
                    } else if (Constant.SINK_HBASE.equals(sinkType)) {
                        ctx.output(hbaseTag, value);
                    }
                }
            });
        
        DataStream<Tuple2<JSONObject, TableProcess>> hbaseStream = kafkaStream.getSideOutput(hbaseTag);
        return Tuple2.of(kafkaStream, hbaseStream);
        
    }
    
    private SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> filterColumns(
        SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> stream) {
        return stream.map(new MapFunction<Tuple2<JSONObject, TableProcess>, Tuple2<JSONObject, TableProcess>>() {
            @Override
            public Tuple2<JSONObject, TableProcess> map(Tuple2<JSONObject, TableProcess> value) throws Exception {
                // 把数据中不需要的字段过滤掉
                JSONObject data = value.f0;
                //id,activity_name,activity_type,activity_desc,start_time,end_time,create_tim
                List<String> columns = Arrays.asList(value.f1.getSink_columns().split(","));  // 存储了素有需要sink的列
                // 遍历data中的每一个列名, 如果存在于columns这个集合中, 则保留, 否则删除
                // 如果删除map中的key
                
                data.keySet().removeIf(key -> !columns.contains(key));
                
                return value;
            }
        });
        
    }
    
    private SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> connectStreams(
        SingleOutputStreamOperator<JSONObject> dataStream,
        SingleOutputStreamOperator<TableProcess> tpSteam) {
        
        MapStateDescriptor<String, TableProcess> tpStateDesc =
            new MapStateDescriptor<>("tpState",
                                     String.class,
                                     TableProcess.class
            );
        
        // 1. 配置流做成广播流
        // key字符串  表名+操作类型   order_info:insert
        BroadcastStream<TableProcess> bcStream = tpSteam.broadcast(tpStateDesc);
        // 2. 数据流去connect广播流
        return dataStream
            .connect(bcStream)
            // 3. 进行处理
            .process(new BroadcastProcessFunction<JSONObject, TableProcess, Tuple2<JSONObject, TableProcess>>() {
                // 处理数据流中的数据
                @Override
                public void processElement(JSONObject value,
                                           ReadOnlyContext ctx,
                                           Collector<Tuple2<JSONObject, TableProcess>> out) throws Exception {
                    // 根据表名:操作类型, 从广播状态中国获取对应的配置信息
                    ReadOnlyBroadcastState<String, TableProcess> tpState = ctx.getBroadcastState(tpStateDesc);
                    // 拼接处key
                    /*
                    {
                      "database": "gmall2022",
                      "table": "order_status_log",
                      "type": "insert",
                      "ts": 1647411239,
                      "xid": 6112,
                      "xoffset": 3995,
                      "data": {
                        "id": 69122,
                        "order_id": 26647,
                        "order_status": "1001",
                        "operate_time": "2022-03-16 14:13:59"
                      }
                    }
                     */
                    String key = value.getString("table") + ":" + value.getString("type");
                    TableProcess tp = tpState.get(key);
                    // 有些表在配置信息中并没有, 表示这张表的数据不需要sink
                    // 如果这张表的配置不存在, tp对象就是null
                    if (tp != null) {
                        // 每条数据的元数据在tp中基本都有体现, 所以, 数据中的元数据信息可以去掉, 只保留data字段中的数据
                        out.collect(Tuple2.of(value.getJSONObject("data"), tp));
                    }
                }
                
                // 处理广播流中的数据
                @Override
                public void processBroadcastElement(TableProcess value,
                                                    Context ctx,
                                                    Collector<Tuple2<JSONObject, TableProcess>> out) throws Exception {
                    // 把流表中的每个配置, 写入到广播状态中
                    BroadcastState<String, TableProcess> tpState = ctx.getBroadcastState(tpStateDesc);
                    
                    String key = value.getSource_table() + ":" + value.getOperate_type();
                    tpState.put(key, value);
                }
            });
        
        
    }
    
    private SingleOutputStreamOperator<TableProcess> readTableProcess(StreamExecutionEnvironment env) {
        // 创建表的执行环节
        StreamTableEnvironment tEnv = StreamTableEnvironment.create(env);
        
        tEnv.executeSql("CREATE TABLE `table_process` (" +
                            "  `source_table` string," +
                            "  `operate_type` string," +
                            "  `sink_type` string," +
                            "  `sink_table` string," +
                            "  `sink_columns` string," +
                            "  `sink_pk` string," +
                            "  `sink_extend` string," +
                            "  PRIMARY KEY (`source_table`,`operate_type`)not enforced" +
                            ")with(" +
                            " 'connector' = 'mysql-cdc'," +
                            " 'hostname' = 'hadoop162'," +
                            " 'port' = '3306'," +
                            " 'username' = 'root'," +
                            " 'password' = 'aaaaaa'," +
                            " 'database-name' = 'gmall2022_realtime'," +
                            " 'table-name' = 'table_process', " +
                            // 程序第一次启动先读取表中所有数据, 然后在使用binlog实时监控变化
                            " 'debezium.snapshot.mode'='initial'" +
                            ")");
        Table tp = tEnv.from("table_process");
        return tEnv
            .toRetractStream(tp, TableProcess.class)
            .filter(t -> t.f0)
            .map(t -> t.f1);
        
        
    }
    
    private SingleOutputStreamOperator<JSONObject> etl(DataStreamSource<String> stream) {
        /*
            {
              "database": "gmall2022",
              "table": "order_status_log",
              "type": "insert",
              "ts": 1647410652,
              "xid": 297,
              "xoffset": 4040,
              "data": {
                "id": 69053,
                "order_id": 26623,
                "order_status": "1001",
                "operate_time": "2022-03-16 14:04:12"
              }
            }
         */
        return stream
            .map(data -> JSON.parseObject(data.replaceAll("bootstrap-", "")))
            .filter(obj ->
                        "gmall2022".equals(obj.getString("database"))
                            && obj.getString("table") != null
                            && ("insert".equals(obj.getString("type")) || "update".equals(obj.getString("type")))
                            && obj.getString("data") != null
                            && obj.getString("data").length() > 2
            );
        
    }
}
