package net.sina.realtime.traffic.controller;

import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
import lombok.SneakyThrows;
import net.sina.realtime.traffic.bean.MonitorInfo;
import net.sina.realtime.traffic.schema.JSONDeserializationSchema;
import org.apache.flink.api.common.RuntimeExecutionMode;
import org.apache.flink.api.common.eventtime.SerializableTimestampAssigner;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.functions.RichMapFunction;
import org.apache.flink.cep.CEP;
import org.apache.flink.cep.PatternSelectFunction;
import org.apache.flink.cep.PatternStream;
import org.apache.flink.cep.nfa.aftermatch.AfterMatchSkipStrategy;
import org.apache.flink.cep.pattern.Pattern;
import org.apache.flink.cep.pattern.conditions.SimpleCondition;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.connector.jdbc.JdbcConnectionOptions;
import org.apache.flink.connector.jdbc.JdbcExecutionOptions;
import org.apache.flink.connector.jdbc.JdbcSink;
import org.apache.flink.connector.jdbc.JdbcStatementBuilder;
import org.apache.flink.shaded.guava18.com.google.common.cache.*;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.windowing.time.Time;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;

import java.sql.*;
import java.time.Duration;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.TimeUnit;

public class _04DangerousDriverControllerFull {
    @Data
    @AllArgsConstructor
    @NoArgsConstructor
    public static class Violation{
        private int id;
        private String car;
        private String violation;
        private Long createTime;
    }

    public static void main(String[] args) throws Exception {
        //1. env-准备环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setRuntimeMode(RuntimeExecutionMode.AUTOMATIC);
        env.setParallelism(1);

        Properties properties = new Properties();
        properties.setProperty("bootstrap.servers","node101:9092");
        properties.setProperty("group.id","g1");
        //2. source-加载数据 此处用到了我们更新
        FlinkKafkaConsumer<MonitorInfo> kafkaConsumer = new FlinkKafkaConsumer<MonitorInfo>("topic-car",
                new JSONDeserializationSchema<>(MonitorInfo.class),properties);
        DataStreamSource<MonitorInfo> streamSource = env.addSource(kafkaConsumer);

        // 使用guawaCache 当我们需要维表join的时候  此时
        SingleOutputStreamOperator<MonitorInfo> ds1 = streamSource.map(new RichMapFunction<MonitorInfo, MonitorInfo>() {

            Connection connection;
            PreparedStatement statement;

            // 定义一个Cache
            LoadingCache<String, Integer> cache;
            @Override
            public void open(Configuration parameters) throws Exception {

                cache = CacheBuilder.newBuilder()
                        //最多缓存个数，超过了就根据最近最少使用算法来移除缓存 LRU
                        .maximumSize(10)
                        //在更新后的指定时间后就回收
                        // 不会自动调用，而是当过期后，又用到了过期的key值数据才会触发的。
                        .expireAfterWrite(10, TimeUnit.MINUTES)
                        .build(//指定加载缓存的逻辑
                                new CacheLoader<String, Integer>() {
                                    // 假如缓存中没有数据，会触发该方法的执行，并将结果自动保存到缓存中
                                    @Override
                                    public Integer load(String monitorId) throws Exception {
                                        System.out.println("进入数据库查询啦。。。。。。。");
                                        statement.setString(1,monitorId);
                                        ResultSet resultSet = statement.executeQuery();
                                        Integer speed_limit = null;
                                        if(resultSet.next()){
                                            System.out.println("进入到了if中.....");
                                            speed_limit = resultSet.getInt("speed_limit");
                                        }else{
                                            speed_limit = 60;
                                        }
                                        return speed_limit;
                                    }
                                });
                // 将mysql的数据加载到map中
                connection = DriverManager.
                        getConnection("jdbc:mysql://node101:3306/flink_project","root","123456");
                statement = connection.prepareStatement("select speed_limit from t_monitor_info where  monitor_id = ? ");
            }

            @Override
            public void close() throws Exception {
                statement.close();
                connection.close();
            }

            @Override
            public MonitorInfo map(MonitorInfo car) throws Exception {
                MonitorInfo carInfo = new MonitorInfo();
                carInfo.setCar(car.getCar());
                // 主要是这句话
                carInfo.setSpeedLimit(cache.get(car.getMonitorId()));
                carInfo.setSpeed(car.getSpeed());
                carInfo.setMonitorId(car.getMonitorId());
                carInfo.setAreaId(car.getAreaId());
                carInfo.setCameraId(car.getCameraId());
                carInfo.setActionTime(car.getActionTime());
                carInfo.setRoadId(car.getRoadId());

                return carInfo;
            }
        });
        // 危险驾驶
        // 假如这个地方你省略了会报错的。这个水印必须加上。
        SingleOutputStreamOperator<MonitorInfo> ds2 = ds1.assignTimestampsAndWatermarks(
                WatermarkStrategy.<MonitorInfo>forBoundedOutOfOrderness(Duration.ofSeconds(5))
                        .withTimestampAssigner(new SerializableTimestampAssigner<MonitorInfo>() {
                            @SneakyThrows
                            @Override
                            public long extractTimestamp(MonitorInfo element, long recordTimestamp) {
                                return element.getActionTime()*1000;
                            }
                        })
        );


        // 需求： 选出连续登录失败2次的用户
        // 定义模式 Pattern
        Pattern<MonitorInfo, MonitorInfo> pattern = Pattern.<MonitorInfo>begin("first",
                        AfterMatchSkipStrategy.skipPastLastEvent())
                .where(new SimpleCondition<MonitorInfo>() {
                    @Override
                    public boolean filter(MonitorInfo monitorInfo) throws Exception {
                        // 判断超速  超速要超过限速的1.2倍
                        double limitSpeed = monitorInfo.getSpeedLimit();
                        if(limitSpeed == 0) {
                            limitSpeed = 60;
                        }
                        return monitorInfo.getSpeed() >= limitSpeed * 1.2;
                    }
                }).times(3).within(Time.minutes(2));

        //3.Pattern应用在事件流上检测（模式匹配）
        // 为什么要分组 因为user 1 和user 2 他们失败的次数不能累计
        PatternStream<MonitorInfo> patternStream = CEP.pattern(ds2.keyBy(v -> v.getCar()), pattern);
        //4.选取结果
        SingleOutputStreamOperator<Violation> select = patternStream.select(new PatternSelectFunction<MonitorInfo, Violation>() {
            @Override
            public Violation select(Map<String, List<MonitorInfo>> map) throws Exception {

                System.out.println(map);
                MonitorInfo carInfo = map.get("first").get(0);

                return new Violation(0,carInfo.getCar(), "危险驾驶", System.currentTimeMillis());
            }
        });

        JdbcConnectionOptions jdbcConnectionOptions = new JdbcConnectionOptions.JdbcConnectionOptionsBuilder()
                .withUrl("jdbc:mysql://node101:3306/flink_project")
                .withDriverName("com.mysql.cj.jdbc.Driver")
                .withUsername("root")
                .withPassword("123456")
                .build();
        select.addSink(JdbcSink.sink(
                "insert into t_violation_list values(?,?,?)",
                new JdbcStatementBuilder<Violation>() {
                    @Override
                    public void accept(PreparedStatement preparedStatement, Violation violation) throws SQLException {
                        preparedStatement.setString(1,violation.getCar());
                        preparedStatement.setString(2,violation.getViolation());
                        preparedStatement.setLong(3,violation.getCreateTime());
                    }
                }, JdbcExecutionOptions.builder().withBatchSize(1).build()
                , jdbcConnectionOptions
        ));

        env.execute();
    }
}
