package cn.doitedu.rtmk.demo11;

import cn.doitedu.rtmk.beans.RuleMetaBean;
import cn.doitedu.rtmk.beans.UserEvent;
import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.connector.base.DeliveryGuarantee;
import org.apache.flink.connector.kafka.sink.KafkaRecordSerializationSchema;
import org.apache.flink.connector.kafka.sink.KafkaSink;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.connector.kafka.source.enumerator.initializer.OffsetsInitializer;
import org.apache.flink.runtime.state.hashmap.HashMapStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.BroadcastStream;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.types.Row;

import java.text.SimpleDateFormat;
import java.util.Properties;

/**
 * @Author: deep as the sea
 * @Site: <a href="www.51doit.com">多易教育</a>
 * @QQ: 657270652
 * @Date: 2023/4/15
 * @Desc: 学大数据，到多易教育
 * 此版本： 解决 ==> 任务failover restart之后，运算机不能被恢复的问题
 * 解决思想：收到过的规则元信息RuleMetaBean要保存在系统的state中，在failover restart之后要进行判断然后重建运算机
 **/
public class Demo11 {

    public static void main(String[] args) throws Exception {

        // 创建编程环境
        Configuration conf = new Configuration();
        conf.setInteger("rest.port",8081);
        StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(conf);
        env.enableCheckpointing(2000, CheckpointingMode.EXACTLY_ONCE);
        env.getCheckpointConfig().setCheckpointStorage("file:/d:/ckpt");
        env.getCheckpointConfig().setTolerableCheckpointFailureNumber(3);
        env.setParallelism(2);
        env.setStateBackend(new HashMapStateBackend());

        StreamTableEnvironment tenv = StreamTableEnvironment.create(env);


        // 从kafka中读取用户的实时行为
        KafkaSource<String> source = KafkaSource.<String>builder()
                .setBootstrapServers("doitedu:9092")
                .setStartingOffsets(OffsetsInitializer.latest())
                .setTopics("dwd-events-detail")
                .setClientIdPrefix("rtmk-")
                .setGroupId("g-rtmk-")
                .setValueOnlyDeserializer(new SimpleStringSchema())
                .build();

        // 用mysql-cdc去监听规则管理平台的mysql中 规则 元数据表，得到规则的预圈选人群
        tenv.executeSql(
                "CREATE TABLE rule_meta_mysql (    " +
                        "      rule_id BIGINT," +
                        "      rule_model_id INT," +
                        "      rule_model_calc_groovy_code STRING," +
                        "      pre_select_users BYTES,                 " +
                        "      rule_param_json STRING,                 " +
                        "      online_status  INT,                     " +
                        "      init_value_endtime  STRING,             " +
                        "     PRIMARY KEY (rule_id) NOT ENFORCED       " +
                        "     ) WITH (                                 " +
                        "     'connector' = 'mysql-cdc',               " +
                        "     'hostname' = 'doitedu'   ,               " +
                        "     'port' = '3306'          ,               " +
                        "     'username' = 'root'      ,               " +
                        "     'password' = 'root'      ,               " +
                        "     'database-name' = 'doit37',              " +
                        "     'table-name' = 'rule_meta_info'          " +
                        ")"
        );

        // 表转流，并将row转成我们自己的封装bean
        DataStream<Row> ruleMetaRowStream = tenv.toChangelogStream(tenv.from("rule_meta_mysql"));
        SingleOutputStreamOperator<RuleMetaBean> ruleMetaBeanStream = ruleMetaRowStream.map(new MapFunction<Row, RuleMetaBean>() {
            @Override
            public RuleMetaBean map(Row row) throws Exception {

                // 获取rowKind
                RuleMetaBean ruleMetaBean = new RuleMetaBean();
                ruleMetaBean.setRowKind(row.getKind().toByteValue());

                // 规则id
                long rule_id = row.getFieldAs("rule_id");
                ruleMetaBean.setRule_id(rule_id);

                // 预圈选人群
                byte[] pre_select_users = row.getFieldAs("pre_select_users");
                ruleMetaBean.setPre_select_users(pre_select_users);

                // 规则模型id
                int rule_model_id = row.getFieldAs("rule_model_id");
                ruleMetaBean.setRule_model_id(rule_model_id);

                // 规则模型所对应的运算机类代码
                String rule_model_calc_groovy_code = row.getFieldAs("rule_model_calc_groovy_code");
                ruleMetaBean.setRule_model_calc_groovy_code(rule_model_calc_groovy_code);

                // 规则实例的参数
                String rule_param_json = row.getFieldAs("rule_param_json");
                ruleMetaBean.setRule_param_json(rule_param_json);

                // 规则的运行状态
                int online_status = row.getFieldAs("online_status");
                ruleMetaBean.setOnline_status(online_status);

                // 规则的跨时间段条件的初始值统计截止时间
                String init_value_endtime = row.getFieldAs("init_value_endtime");
                SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
                ruleMetaBean.setInit_value_endtime(sdf.parse(init_value_endtime).getTime());

                return ruleMetaBean;
            }
        });

        // 广播 规则元信息 流
        MapStateDescriptor<Integer, RuleMetaBean> bstDesc = new MapStateDescriptor<Integer, RuleMetaBean>("bcRuleBean", Integer.class, RuleMetaBean.class);
        BroadcastStream<RuleMetaBean> ruleMetaBeanBroadcastStream = ruleMetaBeanStream.broadcast(bstDesc);


        /**
         * 读取kafka中的用户行为事件
         */
        DataStreamSource<String> eventStrStream = env.fromSource(source, WatermarkStrategy.noWatermarks(), "s");
        SingleOutputStreamOperator<UserEvent> eventStream = eventStrStream.map(json -> JSON.parseObject(json, UserEvent.class));

        /**
         * 规则引擎核心处理
         */
        SingleOutputStreamOperator<JSONObject> resultStream =
                eventStream
                        .keyBy(UserEvent::getUser_id)
                        // 连接元信息广播流  和  用户事件主流
                        .connect(ruleMetaBeanBroadcastStream)
                        .process(new RuleEngineCoreFunction11());


        /**
         * 输出规则处理结果到kafka
         */
        Properties props = new Properties();
        props.setProperty("transaction.timeout.ms","3000");
        KafkaSink<String> sink = KafkaSink.<String>builder()
                .setBootstrapServers("doitedu:9092")
                .setDeliveryGuarantee(DeliveryGuarantee.EXACTLY_ONCE)
                .setKafkaProducerConfig(props)
                .setRecordSerializer(
                        KafkaRecordSerializationSchema.builder()
                                .setKeySerializationSchema(new SimpleStringSchema())
                                .setValueSerializationSchema(new SimpleStringSchema())
                                .setTopic("eagle-rtmk-messages")
                                .build())
                .build();

        resultStream
                .startNewChain()
                .map(obj->JSON.toJSONString(obj))
                .startNewChain()
                .sinkTo(sink);

        env.execute();
    }

}
