package cn.doitedu.rtmk.demo7;

import cn.doitedu.rtmk.common.EventBean;
import com.alibaba.fastjson.JSON;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.connector.kafka.source.enumerator.initializer.OffsetsInitializer;
import org.apache.flink.runtime.state.hashmap.HashMapStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.*;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.types.Row;
import org.apache.flink.types.RowKind;
import org.apache.kafka.clients.consumer.OffsetResetStrategy;
import org.roaringbitmap.longlong.Roaring64Bitmap;

import java.nio.ByteBuffer;


/**
 * 重构了 “静态画像条件”的处理方案
 * 从原来的运行时查询 hbase，重构为：规则上线前做人群预圈选后，注入到运算机对象
 */
public class Demo7 {

    public static void main(String[] args) throws Exception {

        // 创建编程入口
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);
        // 开启checkpoint
        env.enableCheckpointing(5000, CheckpointingMode.EXACTLY_ONCE);
        env.getCheckpointConfig().setCheckpointStorage("file:/d:/ckpt");
        env.getCheckpointConfig().setCheckpointTimeout(2000);
        // 设置状态的backend
        env.setStateBackend(new HashMapStateBackend());

        StreamTableEnvironment tenv = StreamTableEnvironment.create(env);


        /* *
         * 一、从kafka读取用户行为事件
         */
        // 构建 kafka source，读取用户实时行为数据
        KafkaSource<String> source = KafkaSource.<String>builder()
                .setBootstrapServers("doitedu:9092")
                .setStartingOffsets(OffsetsInitializer.committedOffsets(OffsetResetStrategy.LATEST))
                .setGroupId("doit40-1")
                .setTopics("dwd_events")
                .setValueOnlyDeserializer(new SimpleStringSchema())
                .build();

        // 读取kafka中的数据为一个流
        DataStreamSource<String> eventsStr = env.fromSource(source, WatermarkStrategy.noWatermarks(), "s");

        // 解析行为日志为javabean
        SingleOutputStreamOperator<EventBean> beanStream = eventsStr.map(json -> JSON.parseObject(json, EventBean.class));


        // 把相同用户的行为，发到相同的subtask去处理
        KeyedStream<EventBean, Long> keyedEventStream = beanStream.keyBy(bean -> bean.getUid());


        /**
         * 二、用 mysql-cdc 连接器读 规则元信息表的 binlog
         */
        tenv.executeSql(
                "CREATE TABLE rule_meta_mysql (     " +
                        "      rule_id STRING,         " +
                        "      param_json STRING,      " +
                        "      pre_select_users bytes, " +
                        "      online_status int,      " +
                        "     PRIMARY KEY (rule_id) NOT ENFORCED  " +
                        "     ) WITH (                            " +
                        "     'connector' = 'mysql-cdc',          " +
                        "     'hostname' = 'doitedu'   ,          " +
                        "     'port' = '3306'          ,          " +
                        "     'username' = 'root'      ,          " +
                        "     'password' = 'root'      ,          " +
                        "     'database-name' = 'doit40',         " +
                        "     'table-name' = 'rule_meta'          " +
                        ")"
        );

        Table ruleMetaMysqlTable = tenv.from("rule_meta_mysql");
        // row => | -D | rule-01 | {.........} |
        DataStream<Row> ruleMetaCdcStream = tenv.toChangelogStream(ruleMetaMysqlTable);

        SingleOutputStreamOperator<RuleMetaBean> ruleMetaBeanStream =
                ruleMetaCdcStream.map(row -> {
                    String rule_id = row.getFieldAs("rule_id");
                    String param_json = row.getFieldAs("param_json");

                    byte[] pre_select_users = row.getFieldAs("pre_select_users");
                    int online_status = row.getFieldAs("online_status");

                    RowKind kind = row.getKind();
                    String opStr = kind.shortString();


                    // 将cdc抓过来的bitmap序列化字节，反序列化到一个bitmap对象中去
                    Roaring64Bitmap bitmap = Roaring64Bitmap.bitmapOf();
                    bitmap.deserialize(ByteBuffer.wrap(pre_select_users));

                    return new RuleMetaBean(opStr, rule_id, param_json,bitmap,online_status);
                });


        // 将规则元信息流，广播
        MapStateDescriptor<String, String> bcStateDesc = new MapStateDescriptor<>("bc_state", String.class, String.class);
        BroadcastStream<RuleMetaBean> ruleMetaBeanBroadcastStream = ruleMetaBeanStream.broadcast(bcStateDesc);

        /**
         *  三、 连接 规则元信息广播流 和  用户行为事件主流
         */
        SingleOutputStreamOperator<String> res = keyedEventStream
                .connect(ruleMetaBeanBroadcastStream)
                .process(new EngineProcessFunction());

        res.print();

        env.execute();

    }
}
