package com.atguigu.realtime.app.dwd;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.atguigu.realtime.app.BaseAppV1;
import com.atguigu.realtime.bean.TableProcess;
import com.atguigu.realtime.common.Constant;
import com.atguigu.realtime.util.FlinkSinkUtil;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.state.BroadcastState;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.api.common.state.ReadOnlyBroadcastState;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.datastream.BroadcastStream;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.ProcessFunction;
import org.apache.flink.streaming.api.functions.co.BroadcastProcessFunction;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.util.Collector;
import org.apache.flink.util.OutputTag;

import java.util.Arrays;
import java.util.List;

/**
 * @Author lizhenchao@atguigu.cn
 * @Date 2022/2/9 14:02
 */
public class DwdDbApp extends BaseAppV1 {
    public static void main(String[] args) {
        new DwdDbApp().init(2002, 1, "DwdDbApp", "DwdDbApp2", Constant.TOPIC_ODS_DB);
    }
    
    @Override
    public void run(StreamExecutionEnvironment env,
                    DataStreamSource<String> stream) {
        // 1. 对业务数据中的一些脏数据做一些过滤
        SingleOutputStreamOperator<JSONObject> filteredStream = filterStream(stream);
        // 2. 读取配置数据 (cdc)
        SingleOutputStreamOperator<TableProcess> tpStream = readTableProcess(env);
        // 3. 把配置数据做成广播流与业务数据进行connect
        SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> dataTpStream = connect(filteredStream, tpStream);
        // 4. 过滤掉不需要的字段
        SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> columnsClearedStream = filterNotNeedColumns(dataTpStream);
        // 5. 动态分分流: 到kafka的一个流(对应多个topic)  到hbase的一个流(对应多张表)
        Tuple2<DataStream<Tuple2<JSONObject, TableProcess>>, DataStream<Tuple2<JSONObject, TableProcess>>> kafkaHbaseStreams
            = dynamicSplit(columnsClearedStream);
        // 6. 把流中数据写入到对应的sink
        write2Kafka(kafkaHbaseStreams.f0);
        write2Hbase(kafkaHbaseStreams.f1);
    }
    
    private void write2Hbase(DataStream<Tuple2<JSONObject, TableProcess>> stream) {
        /*
            通过Phoenix
            1. 有没有专用Phoenix的sink
                没有
            2. Phoenix是sql写入, 能不能使用jdbc sink, 来封装一个Phoenix sink?
                只能写入, 需要提前手动创建表. 不够灵活
                
                应该自动创建表, 维度数据过来的时候, 如果表不存在, 则应该自动创建.
                 1. 建表的sql
                 2. 插入数据的sql
                 
                 由于jdbc sink 只能执行一个sql, 所以不能使用jdbc sink, 必须自定义sink
                
         */
        // 同一张表的数据, 分到一组
        stream
            .keyBy(t -> t.f1.getSink_table())
            .addSink(FlinkSinkUtil.getPhoenixSink());
        
    }
    
    private void write2Kafka(DataStream<Tuple2<JSONObject, TableProcess>> stream) {
        stream
            // 不是必要
            .keyBy(t -> t.f1.getSink_table())// 按照sinkTable 来做keyBy, 保证同一个topic的数据进入同一个并行度. 如果后面的sinkFunction用到监控状态
            .addSink(FlinkSinkUtil.getKafkaSink());
    }
    
    private Tuple2<DataStream<Tuple2<JSONObject, TableProcess>>, DataStream<Tuple2<JSONObject, TableProcess>>> dynamicSplit(SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> stream) {
        OutputTag<Tuple2<JSONObject, TableProcess>> hbaseTag = new OutputTag<Tuple2<JSONObject, TableProcess>>("hbase") {};
        /*
        动态分流: 到kaka所有数据放入到一个流(不同的表写入到不同的topic), 到hbase的放入另外一个流(Phoenix)
        
        到kakfa的放入到主流b
        到hbase的放入到侧输出流
         */
        SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> kafkaStream = stream.process(new ProcessFunction<Tuple2<JSONObject, TableProcess>, Tuple2<JSONObject, TableProcess>>() {
            @Override
            public void processElement(Tuple2<JSONObject, TableProcess> value,
                                       Context ctx,
                                       Collector<Tuple2<JSONObject, TableProcess>> out) throws Exception {
                String sinkType = value.f1.getSink_type();
                if (Constant.SINK_KAFKA.equals(sinkType)) {
                    out.collect(value);
                } else if (Constant.SINK_HBASE.equals(sinkType)) {
                    ctx.output(hbaseTag, value);
                }
                
            }
        });
        
        DataStream<Tuple2<JSONObject, TableProcess>> hbaseStream = kafkaStream.getSideOutput(hbaseTag);
        
        //返回一个元组  (kafkaStream, hbaseStream)
        return Tuple2.of(kafkaStream, hbaseStream);
        
    }
    
    private SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> filterNotNeedColumns(SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> dataTpStream) {
        return dataTpStream.map(new MapFunction<Tuple2<JSONObject, TableProcess>, Tuple2<JSONObject, TableProcess>>() {
            @Override
            public Tuple2<JSONObject, TableProcess> map(Tuple2<JSONObject, TableProcess> t) throws Exception {
                JSONObject data = t.f0;
                TableProcess tp = t.f1;
                // 从data中删除在sink_columns中不存在的字段  id,activity_name,activity_type,activity_desc,start_time,end_time,create_time
                List<String> columns = Arrays.asList(tp.getSink_columns().split(","));  // 方便判断某个列是否存在
                
                // 从map中删除价值对
                // 拿到map中所有的key
                // 从集合中删除元素, 使用迭代器. 千万不要在for循环中删除
                // 删除刚刚遍历到的列
                data.keySet().removeIf(c -> !columns.contains(c));
                
                return t;
            }
        });
    }
    
    private SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> connect(SingleOutputStreamOperator<JSONObject> dataStream,
                                                                                 SingleOutputStreamOperator<TableProcess> tpStream) {
        // key:表名_insert  value:TableProcess
        MapStateDescriptor<String, TableProcess> tpStateDesc = new MapStateDescriptor<>("tpState", String.class, TableProcess.class);
        // 1. 把配置流做成广播流
        BroadcastStream<TableProcess> tpBCStream = tpStream.broadcast(tpStateDesc);
        // 2. 让数据流去connect广播流
        return dataStream
            .connect(tpBCStream)
            .process(new BroadcastProcessFunction<JSONObject, TableProcess, Tuple2<JSONObject, TableProcess>>() {
                
                // 处理业务数据
                @Override
                public void processElement(JSONObject value,
                                           ReadOnlyContext ctx,
                                           Collector<Tuple2<JSONObject, TableProcess>> out) throws Exception {
                    /*
                    {
                      "database": "gmall2021",
                      "table": "spu_info",
                      "type": "insert",
                      "ts": 1644387417,
                      "data": {
                        "id": 10,
                        "spu_name": "CAREMiLLE珂曼奶油小方口红 雾面滋润保湿持久丝缎唇膏",
                        "description": "CAREMiLLE珂曼奶油小方口红 雾面滋润保湿持久丝缎唇膏",
                        "category3_id": 477,
                        "tm_id": 9
                      }
                    }
                     */
                    // 4. 数据流中的数据处理的时候, 从广播状态读取配置信息, 与数据信息组成一个元组  (数据, 配置)
                    ReadOnlyBroadcastState<String, TableProcess> tpState = ctx.getBroadcastState(tpStateDesc);
                    // 把value对应的配置信息取出来
                    String key = value.getString("table") + ":" + value.getString("type");
                    // tp可能是null, 如果有些表不需要实时分析, 就没有对应的配置信息
                    TableProcess tp = tpState.get(key);
                    if (tp != null) {
                        // 向流中写入业务能力数据的时候,优化: 可以只写入具体的数据, 一些元数据信息可以省略
                        out.collect(Tuple2.of(value.getJSONObject("data"), tp));
                    }
                    
                }
                
                // 处理广播流中的元素
                @Override
                public void processBroadcastElement(TableProcess tp,
                                                    Context ctx,
                                                    Collector<Tuple2<JSONObject, TableProcess>> out) throws Exception {
                    // 3. 把广播流中的配置数据, 放入到广播状态
                    BroadcastState<String, TableProcess> tpState = ctx.getBroadcastState(tpStateDesc);
                    String key = tp.getSource_table() + ":" + tp.getOperate_type();
                    tpState.put(key, tp);
                }
            });
        
    }
    
    private SingleOutputStreamOperator<TableProcess> readTableProcess(StreamExecutionEnvironment env) {
        StreamTableEnvironment tEnv = StreamTableEnvironment.create(env);
        
        tEnv.executeSql("CREATE TABLE `table_process` (" +
                            "  `source_table` string," +
                            "  `operate_type` string," +
                            "  `sink_type` string," +
                            "  `sink_table` string," +
                            "  `sink_columns` string," +
                            "  `sink_pk` string," +
                            "  `sink_extend` string, " +
                            "  PRIMARY KEY (`source_table`,`operate_type`) not enforced" +
                            ")with(" +
                            "  'connector' = 'mysql-cdc'," +
                            "  'hostname' = 'hadoop162'," +
                            "  'port' = '3306'," +
                            "  'username' = 'root'," +
                            "  'password' = 'aaaaaa'," +
                            "  'database-name' = 'gmall2021_realtime'," +
                            "  'table-name' = 'table_process', " +
                            "   'debezium.snapshot.mode' = 'initial'" +  // 当第一次启动的时候, 先读取表中所有的数据, 然后再根据binlog来监控变化数据
                            ")"); // ddl 增删改
        
        Table tp = tEnv.from("table_process");
        return tEnv
            .toRetractStream(tp, TableProcess.class)
            .filter(t -> t.f0)
            .map(t -> t.f1);
        
    }
    
    private SingleOutputStreamOperator<JSONObject> filterStream(DataStreamSource<String> stream) {
        /*
        {
            "database":"gmall2021",
            "table":"cart_info",
            "type":"insert",
            "ts":1644386718,
            "xid":42743,
            "xoffset":2345,
            "data":{
                "id":147933,
                "user_id":"739",
                "sku_id":2,
                "cart_price":6999,
                "sku_num":2,
                "img_url":"http://47.93.148.192:8080/group1/M00/00/01/rBHu8l-rfvmAcbl2AAFopp2WGBQ404.jpg",
                "sku_name":"小米10 至尊纪念版 双模5G 骁龙865 120HZ高刷新率 120倍长焦镜头 120W快充 12GB+256GB 陶瓷黑 游戏手机",
                "is_checked":null,
                "create_time":"2022-02-09 14:05:17",
                "operate_time":null,
                "is_ordered":0,
                "order_time":null,
                "source_type":"2401",
                "source_id":null
            }
        }
         */
        //        stream.print("xxxx");
        return stream
            .map(data -> JSON.parseObject(data.replaceAll("bootstrap-insert", "insert")))
            .filter(obj ->
                        "gmall2021".equals(obj.getString("database"))
                            && obj.getString("table") != null
                            && ("insert".equals(obj.getString("type")) || "update".equals(obj.getString("type")))
                            && obj.getJSONObject("data") != null
            );
        
    }
}
