package broadcast;

/**
 * @author wangbh
 * @Description: StreamKafkaJoinPostgres
 * @date 2021/10/18 10:53
 */
/**
 * 需求：
 * 将postgresql中的数据读取到streamPgSql中，作为配置数据，包含code和name
 * 同时将streamPgSql通过广播，减少数据的内存消耗
 *
 * 将kafka中的数据与postgresql中的数据进行join，清洗，得到相应的数据
 *
 * Broadcast会将state广播到每个task
 * 注意该state并不会跨task传播
 * 对其修改，仅仅是作用在其所在的task
 */
import com.asap.demo.model.RuleBean;
import com.asap.demo.model.StandardEvent;
import com.asap.demo.sourcefunc.MysqlSourceFunction;
import com.asap.demo.sourcefunc.MysqlSourceFunction1;
import com.asap.demo.sourcefunc.MysqlSourceFunction2;
import com.asap.demo.utils.Constants;
import com.asap.demo.utils.Utils;
import com.asap.interf.Action;
import com.asap.interf.Event;
import com.asap.rule.config.ConfigCenter;
import com.asap.rule.engine.InferenceEngine;
import com.asap.rule.engine.PatternMatcher;
import com.asap.rule.orm.DbFetcher;
import com.asap.rule.util.RuleReader;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.common.state.BroadcastState;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.api.common.state.ReadOnlyBroadcastState;
import org.apache.flink.api.common.typeinfo.BasicTypeInfo;
import org.apache.flink.api.java.typeutils.ListTypeInfo;
import org.apache.flink.api.java.typeutils.MapTypeInfo;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.runtime.testutils.MiniClusterResourceConfiguration;
import org.apache.flink.streaming.api.datastream.BroadcastStream;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.co.BroadcastProcessFunction;
import org.apache.flink.streaming.api.functions.timestamps.BoundedOutOfOrdernessTimestampExtractor;
import org.apache.flink.streaming.api.windowing.time.Time;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import org.apache.flink.test.util.MiniClusterWithClientResource;
import org.apache.flink.api.java.tuple.Tuple5;
import org.apache.flink.util.Collector;
import org.junit.ClassRule;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.sql.Timestamp;
import java.util.*;

public class StreamKafkaJoinPostgres {
    private static final Logger logger = LoggerFactory.getLogger(StreamKafkaJoinPostgres.class);

    @ClassRule
    public static MiniClusterWithClientResource flinkCluster =
            new MiniClusterWithClientResource(
                    new MiniClusterResourceConfiguration.Builder()
                            .setNumberSlotsPerTaskManager(3)
                            .setNumberTaskManagers(2)
                            .build());

    @Test
    public void StreamKafkaJoinPostgres() throws Exception {

            final String topic = "web";
            final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
            env.setParallelism(1);
//        env.enableCheckpointing(5000);  //检查点 每5000ms
//        env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);

//        final StreamTableEnvironment tenv = TableEnvironment.getTableEnvironment(env);
            Properties browseProperties = new Properties();
            browseProperties.put("bootstrap.servers", "192.168.1.25:9093");
            browseProperties.put("group.id", "temporal");
            browseProperties.put("auto.offset.reset", "latest");

            Map<String, String> configMap = new HashMap<String,String>();
            configMap.put(Constants.DB_JDBC_USER,"root");
            configMap.put(Constants.DB_JDBC_PASSWD,"1qazXSW@3edc");
            configMap.put(Constants.DB_JDBC_URL,"jdbc:mysql://192.168.1.234:3306/SSA?useUnicode=true&characterEncoding=utf-8");
            configMap.put(Constants.DB_JDBC_DRIVER,"com.mysql.jdbc.Driver");
            configMap.put(Constants.INITAL_POOL_SIZE,"10");
            configMap.put(Constants.MIN_POOL_SIZE,"5");
            configMap.put(Constants.MAX_IDLE_TIME,"50");
            configMap.put(Constants.MAX_STATE_ELEMENTS,"100");
            configMap.put(Constants.MAX_IDLE_TIME,"60");
            DbFetcher dbFetcher = new DbFetcher(configMap);
            List<String> listRule= RuleReader.readRules(dbFetcher);
            System.out.println("ListRule::"+listRule.size());
            //1、读取postgresQL的配置消息
          // DataStream<List<String>> streamPgSql = env.addSource(new MysqlSourceFunction1(dbFetcher));

            //1、读取postgresQL的配置消息
           DataStream<RuleBean> streamPgSql = env.addSource(new MysqlSourceFunction2());

            final DataStream <HashMap <String, RuleBean>> conf = streamPgSql.map(new MapFunction<RuleBean, HashMap <String, RuleBean>>() {
                @Override
                public HashMap <String, RuleBean> map(RuleBean value) throws Exception {
                    HashMap <String, RuleBean> hashMap = new HashMap <>();
                    hashMap.put(value.getRuleId(), value);
                    //System.out.println(value.getRuleId()+" : "+value);
                    return hashMap;
                }
            });

            //2、创建MapStateDescriptor规则，对广播的数据的数据类型的规则
            MapStateDescriptor <String,Map<String,RuleBean>> ruleStateDescriptor = new MapStateDescriptor<>("RulesBroadcastState"
                    , BasicTypeInfo.STRING_TYPE_INFO
                    ,new MapTypeInfo<>(String.class,RuleBean.class));
            //3、对conf进行broadcast返回BroadcastStream
            final BroadcastStream<HashMap <String, RuleBean>> confBroadcast = conf.broadcast(ruleStateDescriptor);

            DataStream<StandardEvent> kafkaData = env
                    .addSource(new FlinkKafkaConsumer<>(
                            "flink_pressure_test",
                            new SimpleStringSchema(),
                            browseProperties
                    ))
                    .assignTimestampsAndWatermarks(new BoundedOutOfOrdernessTimestampExtractor<String>(Time.minutes(1)) {
                        @Override
                        public long extractTimestamp(String element) {
                            return Timestamp.valueOf(Utils.transforDate(StandardEvent.parse(element).getField("CREATE_TIME"))).getTime();
                        }
                    }).map(new MapFunction<String, StandardEvent>() {
                        @Override
                        public StandardEvent map(String value) throws Exception {
                            StandardEvent standardEvent = StandardEvent.parse(value);
                            return standardEvent;
                        }
                    }).connect(confBroadcast)
                    .process(
                            new BroadcastProcessFunction<StandardEvent, HashMap<String, RuleBean>, StandardEvent>() {
                              private HashMap<String,RuleBean> ruleMap = new HashMap <>();

                                /**
                                 * open方法只会执行一次
                                 * 可以在这实现初始化的功能
                                 * 4、设置keyWords的初始值，否者会报错：java.lang.NullPointerException
                                 *
                                 * @param parameters
                                 * @throws Exception
                                 */
                                @Override
                                public void open(Configuration parameters) throws Exception {
                                    super.open(parameters);
                                }

                                @Override
                                public void processElement(StandardEvent standardEvent, ReadOnlyContext readOnlyContext, Collector<StandardEvent> collector) throws Exception {


                                }

                                @Override
                                public void processBroadcastElement(HashMap<String, RuleBean> stringRuleBeanHashMap, Context context, Collector<StandardEvent> collector) throws Exception {
                                  //  System.out.println("收到广播数据：" + stringRuleBeanHashMap.values());
                                    BroadcastState<String, Map<String, RuleBean>> broadcastState = context.getBroadcastState(ruleStateDescriptor);
                                    ruleMap.putAll(stringRuleBeanHashMap);

                                    stringRuleBeanHashMap.forEach((k,v)->{
                                        System.out.println("Item : " + k + " Count : " + v);
                                    });


//                                    //调用迭代器的经过集合实现的抽象方法遍历集合元素
//                                    while(broadcastState.entries().iterator().hasNext())
//                                    {
//                                        System.out.println("收到广播数据：" + broadcastState.entries().iterator().next().getKey());
//                                    }

                                }
                            }
                    );



//            //读取kafka中的stream
//            FlinkKafkaConsumer<String> Stream = new FlinkKafkaConsumer <>(topic, new SimpleStringSchema(), browseProperties);
//            webStream.setStartFromEarliest();
//            DataStream <String> kafkaData = env.addSource(webStream).setParallelism(1);
//            DataStream <Tuple5 <String, String, String, String, String>> map = kafkaData.map(new MapFunction <String, Tuple5 <String, String, String, String, String>>() {
//                @Override
//                public Tuple5 <String, String, String, String, String> map(String value) throws Exception {
//                    String[] tokens = value.split("\\t");
//                    return new Tuple5 <>(tokens[0], tokens[1], tokens[2], tokens[3], tokens[4]);
//                }
//            })
//                    //使用connect连接BroadcastStream，然后使用process对BroadcastConnectedStream流进行处理
//                    .connect(confBroadcast)
//                    .process(new BroadcastProcessFunction<Tuple5 <String, String, String, String, String>, HashMap <String, String>, Tuple5 <String, String, String, String, String>>() {
//                        private HashMap<String,String> keyWords = new HashMap <>();
//                        MapStateDescriptor <String,Map<String,String>> ruleStateDescriptor = new MapStateDescriptor <>("RulesBroadcastState"
//                                ,BasicTypeInfo.STRING_TYPE_INFO
//                                ,new MapTypeInfo<>(String.class,String.class));
//
//                        @Override
//                        public void open(Configuration parameters) throws Exception {
//                            super.open(parameters);
//                        }
//
//                        @Override
//                        public void processElement(Tuple5 <String, String, String, String, String> value, ReadOnlyContext ctx, Collector<Tuple5 <String, String, String, String, String>> out) throws Exception {
////                        Thread.sleep(10000);
//                            Map<String, String> map= ctx.getBroadcastState(ruleStateDescriptor).get("keyWords");
//                            String result = map.get(value.f3);
//                            if (result == null) {
//                                out.collect(new Tuple5 <>(value.f0, value.f1, value.f2, value.f3, value.f4));
//                            } else {
//                                out.collect(new Tuple5 <>(value.f0, value.f1, value.f2, result, value.f4));
//                            }
//
//                        }
//
//                        /**
//                         * 接收广播中的数据
//                         * @param value
//                         * @param ctx
//                         * @param out
//                         * @throws Exception
//                         */
//                        @Override
//                        public void processBroadcastElement(HashMap <String, String> value, Context ctx, Collector <Tuple5 <String, String, String, String, String>> out) throws Exception {
////                        System.out.println("收到广播数据："+value.values());
//                            BroadcastState<String, Map <String, String>> broadcastState = ctx.getBroadcastState(ruleStateDescriptor);
//                            keyWords.putAll(value);
//                            broadcastState.put("keyWords", keyWords);
//                        }
//                    });

         //   map.print();
            env.execute("Broadcast test kafka");
        }
    @Test
    public void StreamKafkaJoinPostgres1() throws Exception {

        final String topic = "web";
        final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);
//        env.enableCheckpointing(5000);  //检查点 每5000ms
//        env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);

//        final StreamTableEnvironment tenv = TableEnvironment.getTableEnvironment(env);
        Properties browseProperties = new Properties();
        browseProperties.put("bootstrap.servers", "192.168.1.25:9093");
        browseProperties.put("group.id", "temporal");
        browseProperties.put("auto.offset.reset", "latest");

        Map<String, String> configMap = new HashMap<String,String>();
        configMap.put(Constants.DB_JDBC_USER,"root");
        configMap.put(Constants.DB_JDBC_PASSWD,"1qazXSW@3edc");
        configMap.put(Constants.DB_JDBC_URL,"jdbc:mysql://192.168.1.234:3306/SSA?useUnicode=true&characterEncoding=utf-8");
        configMap.put(Constants.DB_JDBC_DRIVER,"com.mysql.jdbc.Driver");
        configMap.put(Constants.INITAL_POOL_SIZE,"10");
        configMap.put(Constants.MIN_POOL_SIZE,"5");
        configMap.put(Constants.MAX_IDLE_TIME,"50");
        configMap.put(Constants.MAX_STATE_ELEMENTS,"100");
        configMap.put(Constants.MAX_IDLE_TIME,"60");
        DbFetcher dbFetcher = new DbFetcher(configMap);
        List<String> listRule= RuleReader.readRules(dbFetcher);
        System.out.println("ListRule::"+listRule.size());
        //1、读取postgresQL的配置消息
        DataStream<List<String>> conf = env.addSource(new MysqlSourceFunction1(dbFetcher));

        //2、创建MapStateDescriptor规则，对广播的数据的数据类型的规则
        MapStateDescriptor <String,List<String>> ruleStateDescriptor = new MapStateDescriptor<>("RulesBroadcastState"
                , BasicTypeInfo.STRING_TYPE_INFO
                ,new ListTypeInfo<>(String.class));
        //3、对conf进行broadcast返回BroadcastStream
        final BroadcastStream<List<String>> confBroadcast = conf.broadcast(ruleStateDescriptor);

        DataStream<StandardEvent> kafkaData = env
                .addSource(new FlinkKafkaConsumer<>(
                        "flink_pressure_test",
                        new SimpleStringSchema(),
                        browseProperties
                ))
                .assignTimestampsAndWatermarks(new BoundedOutOfOrdernessTimestampExtractor<String>(Time.minutes(1)) {
                    @Override
                    public long extractTimestamp(String element) {
                        return Timestamp.valueOf(Utils.transforDate(StandardEvent.parse(element).getField("CREATE_TIME"))).getTime();
                    }
                }).map(new MapFunction<String, StandardEvent>() {
                    @Override
                    public StandardEvent map(String value) throws Exception {
                        StandardEvent standardEvent = StandardEvent.parse(value);
                        return standardEvent;
                    }
                }).connect(confBroadcast)
                .process(
                        new BroadcastProcessFunction<StandardEvent, List<String>, StandardEvent>() {
                            private List<String> listRule = new ArrayList<String>();
                            InferenceEngine engine =null;
                            @Override
                            public void processElement(StandardEvent standardEvent, ReadOnlyContext readOnlyContext, Collector<StandardEvent> collector) throws Exception {
                                PatternMatcher matcher = engine.matcher((Event) standardEvent);
                                if(matcher.find()) {
                                    List<Action> actions = matcher.getActions();
                                    for(Action action:actions){
                                        System.out.println("action"+action.getName());
                                    }
                                }else{
                                    System.out.println("action11111");
                                }
                            }

                            @Override
                            public void processBroadcastElement(List<String> strings, Context context, Collector<StandardEvent> collector) throws Exception {
                                System.out.println("++++++:"+strings.size());
                                this.engine = InferenceEngine.compile(RuleReader.parseRules(strings));
                            }
                        }
                );



//            //读取kafka中的stream
//            FlinkKafkaConsumer<String> Stream = new FlinkKafkaConsumer <>(topic, new SimpleStringSchema(), browseProperties);
//            webStream.setStartFromEarliest();
//            DataStream <String> kafkaData = env.addSource(webStream).setParallelism(1);
//            DataStream <Tuple5 <String, String, String, String, String>> map = kafkaData.map(new MapFunction <String, Tuple5 <String, String, String, String, String>>() {
//                @Override
//                public Tuple5 <String, String, String, String, String> map(String value) throws Exception {
//                    String[] tokens = value.split("\\t");
//                    return new Tuple5 <>(tokens[0], tokens[1], tokens[2], tokens[3], tokens[4]);
//                }
//            })
//                    //使用connect连接BroadcastStream，然后使用process对BroadcastConnectedStream流进行处理
//                    .connect(confBroadcast)
//                    .process(new BroadcastProcessFunction<Tuple5 <String, String, String, String, String>, HashMap <String, String>, Tuple5 <String, String, String, String, String>>() {
//                        private HashMap<String,String> keyWords = new HashMap <>();
//                        MapStateDescriptor <String,Map<String,String>> ruleStateDescriptor = new MapStateDescriptor <>("RulesBroadcastState"
//                                ,BasicTypeInfo.STRING_TYPE_INFO
//                                ,new MapTypeInfo<>(String.class,String.class));
//
//                        @Override
//                        public void open(Configuration parameters) throws Exception {
//                            super.open(parameters);
//                        }
//
//                        @Override
//                        public void processElement(Tuple5 <String, String, String, String, String> value, ReadOnlyContext ctx, Collector<Tuple5 <String, String, String, String, String>> out) throws Exception {
////                        Thread.sleep(10000);
//                            Map<String, String> map= ctx.getBroadcastState(ruleStateDescriptor).get("keyWords");
//                            String result = map.get(value.f3);
//                            if (result == null) {
//                                out.collect(new Tuple5 <>(value.f0, value.f1, value.f2, value.f3, value.f4));
//                            } else {
//                                out.collect(new Tuple5 <>(value.f0, value.f1, value.f2, result, value.f4));
//                            }
//
//                        }
//
//                        /**
//                         * 接收广播中的数据
//                         * @param value
//                         * @param ctx
//                         * @param out
//                         * @throws Exception
//                         */
//                        @Override
//                        public void processBroadcastElement(HashMap <String, String> value, Context ctx, Collector <Tuple5 <String, String, String, String, String>> out) throws Exception {
////                        System.out.println("收到广播数据："+value.values());
//                            BroadcastState<String, Map <String, String>> broadcastState = ctx.getBroadcastState(ruleStateDescriptor);
//                            keyWords.putAll(value);
//                            broadcastState.put("keyWords", keyWords);
//                        }
//                    });

        //   map.print();
        env.execute("Broadcast test kafka");
    }
}
