package flink.scene.mysqlCdc;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.ververica.cdc.connectors.mysql.source.MySqlSource;
import com.ververica.cdc.connectors.mysql.table.StartupOptions;
import flink.http_connect.R;
import flink.mysql_connect.cdc.CdcDataVO;
import flink.mysql_connect.cdc.MysqlDataParseSchema;
import io.debezium.data.Envelope;
import lombok.extern.slf4j.Slf4j;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.functions.FilterFunction;
import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.java.functions.KeySelector;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.api.java.tuple.Tuple3;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.AsyncDataStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.async.ResultFuture;
import org.apache.flink.streaming.api.functions.async.RichAsyncFunction;
import org.apache.flink.streaming.connectors.redis.RedisSink;
import org.apache.flink.streaming.connectors.redis.common.config.FlinkJedisPoolConfig;
import org.apache.flink.streaming.connectors.redis.common.mapper.RedisCommand;
import org.apache.flink.streaming.connectors.redis.common.mapper.RedisCommandDescription;
import org.apache.flink.streaming.connectors.redis.common.mapper.RedisMapper;
import org.apache.flink.util.Collector;
import org.apache.http.HttpEntity;
import org.apache.http.HttpResponse;
import org.apache.http.NameValuePair;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.client.entity.UrlEncodedFormEntity;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.entity.StringEntity;
import org.apache.http.impl.nio.client.CloseableHttpAsyncClient;
import org.apache.http.impl.nio.client.HttpAsyncClients;
import org.apache.http.message.BasicNameValuePair;
import org.apache.http.util.EntityUtils;

import java.time.LocalDateTime;
import java.util.*;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;

@Slf4j
public class DeviceCount {

    public static void main(String[] args) throws Exception {
        //时区设置
        TimeZone.setDefault(TimeZone.getTimeZone("Asia/Shanghai"));

        //设置 从上次保存的同步点 开始同步
//        Configuration configuration = new Configuration();
//        configuration.setString("execution.savepoint.path","file:///C://Users//whyfzx//Desktop//module-flink//src//main//resources//ck//dc2f1b911e3d3f371007de0c4b9b243e//chk-35");

        //检查点保存路径
//        String ckPath = "file:///"+System.getProperty("user.dir")+ ConnConst.flink_ck_path;

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        // 启用 checkpoint,设置触发间隔（两次执行开始时间间隔）
        env.enableCheckpointing(3000);
        // 模式支持EXACTLY_ONCE() 每条数据只会被处理一次/AT_LEAST_ONCE() 至少执行一次
        env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
        // 存储位置，FileSystemCheckpointStorage(文件存储)
//        env.getCheckpointConfig().setCheckpointStorage(ckPath);
        // 超时时间，checkpoint没在时间内完成则丢弃
        env.getCheckpointConfig().setCheckpointTimeout(10000L);
        // 同时并发数量
        env.getCheckpointConfig().setMaxConcurrentCheckpoints(2);
        // 最小间隔时间（前一次结束时间，与下一次开始时间间隔）
        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(1000);
        // 外部checkpoint(例如文件存储)清除策略
        env.getCheckpointConfig().setExternalizedCheckpointCleanup(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);

        //获取mysql cdc数据
        Properties properties = new Properties();
        // 配置 Debezium在初始化快照的时候（扫描历史数据的时候） =》 不要锁表
        properties.setProperty("debezium.snapshot.locking.mode", "none");

        MySqlSource<CdcDataVO> mySqlSource = MySqlSource.<CdcDataVO>builder()
                .hostname("192.168.20.130")
                .port(3306)
                .username("root")
                .password("root_1234")
                .serverTimeZone("Asia/Shanghai")
                .databaseList("smart_park_ex")
                .tableList("smart_park_ex.device_info")
                .debeziumProperties(properties)
                //可自定义返回结果集
                .deserializer(new MysqlDataParseSchema())
                /*
                 *  .startupOptions(StartupOptions.latest()) 参数配置
                 *  1.initial() 全量扫描并且继续读取最新的binlog 最佳实践是第一次使用这个
                 *  2.earliest() 从binlog的开头开始读取 就是啥时候开的binlog就从啥时候读
                 *  3.latest() 从最新的binlog开始读取
                 *  4.specificOffset(String specificOffsetFile, int specificOffsetPos) 指定offset读取
                 *  5.timestamp(long startupTimestampMillis) 指定时间戳读取
                 * */
                .startupOptions(StartupOptions.initial())
                .serverTimeZone("UTC")
                .build();
        // 过滤
        SingleOutputStreamOperator<Tuple3<String, String,Integer>> dataStream = env.fromSource(mySqlSource, WatermarkStrategy.noWatermarks(), "mysql-binlog-data")
                .filter((FilterFunction<CdcDataVO>) data -> {
//                    String targetDate = LocalDateTime.now().toString().substring(0, 10);//当天日期比如2022-04-25
                    Envelope.Operation type = data.getOperatorType();
                    Map<String, Object> afterMap = data.getAfterMap();
                    Map<String, Object> beforeMap = data.getBeforeMap();
                    // create || read
                    if (Envelope.Operation.READ.equals(type) || Envelope.Operation.CREATE.equals(type)) {
                        return afterMap.get("CREATE_DATE") != null;
                    }
                    // delete
                    if (Envelope.Operation.DELETE.equals(type)) {
                        return beforeMap.get("CREATE_DATE") != null ;
                    }
                    // update
                    if (Envelope.Operation.UPDATE.equals(type)) {
                        if (afterMap.get("CREATE_DATE") !=null ) {
                            return afterMap.get("STS") != null && !afterMap.get("STS").equals(beforeMap.get("STS"));
                        }
                    }
                    return false;
                }).flatMap(new FlatMapFunction<CdcDataVO, Tuple3<String, String,Integer>>() {
                    @Override
                    public void flatMap(CdcDataVO data, Collector<Tuple3<String, String,Integer>> collector) throws Exception {
                        Envelope.Operation type = data.getOperatorType();
                        Map<String, Object> afterMap = data.getAfterMap();
                        Map<String, Object> beforeMap = data.getBeforeMap();
                        //  read  || create
                        if (Envelope.Operation.READ.equals(type) || Envelope.Operation.CREATE.equals(type)) {
                            Object sts = afterMap.get("STS");
                            Object parkId = afterMap.get("PARK_ID");
                            Object deviceType = afterMap.get("DEVICE_TYPE");
                            if (sts !=null) {
                                collector.collect(Tuple3.of(parkId.toString(),deviceType.toString(), sts.toString().equals("A") ? 1 : 0));
                            }
                        }
                        //delete
                        if (Envelope.Operation.DELETE.equals(type)) {
                            Object sts = beforeMap.get("STS");
                            Object parkId = beforeMap.get("PARK_ID");
                            Object deviceType = beforeMap.get("DEVICE_TYPE");
                            if (sts !=null) {
                                collector.collect(Tuple3.of(parkId.toString(),deviceType.toString(), sts.toString().equals("A") ? -1 : 0));
                            }
                        }
                        // update
                        if ( Envelope.Operation.UPDATE.equals(type)) {
                            Object sts = afterMap.get("STS");
                            Object parkId = afterMap.get("PARK_ID");
                            Object deviceType = afterMap.get("DEVICE_TYPE");
                            if (sts != null) {
                                collector.collect(Tuple3.of(parkId.toString(),deviceType.toString(),sts.toString().equals("A") ? 1 : -1));
                            }
                        }
                    }
                }).keyBy(new KeySelector<Tuple3<String, String,Integer>, Tuple2<String, String>>() {
                    @Override
                    public Tuple2<String, String> getKey(Tuple3<String, String,Integer> tuple3) throws Exception {
                        return Tuple2.of(tuple3.f0,tuple3.f1);
                    }
                }).sum(2);

//        // 创建redis链接配置
//        FlinkJedisPoolConfig redisConf = new FlinkJedisPoolConfig.Builder().setHost("192.168.20.130").build();
//        //输出到redis
//        dataStream.addSink(new RedisSink<>(redisConf,new MyRedisMapper()));

        //创建http链接
        SingleOutputStreamOperator<R> result = AsyncDataStream.orderedWait(
                dataStream,                          //输入的数据流
                new AsyncHttpFunction(),   //异步查询的Function实例
                2000,                   //超时时间
                TimeUnit.MILLISECONDS,         //时间单位
                10                     //最大异步并发请求数量（并发的线程队列数）
        );


        dataStream.print();
        env.execute("deviceTypeJob");
    }

    static class MyRedisMapper implements RedisMapper<Tuple3<String, String,Integer>>{

        @Override
        public RedisCommandDescription getCommandDescription() {
            // 参数1：redis数据类型   参数2：redis中存储的key
            return new RedisCommandDescription(RedisCommand.HSET,"deviceTypeJob");
        }

        @Override
        public String getKeyFromData(Tuple3<String, String,Integer> tuple3) {
            return "deviceType";
        }

        public String getValueFromData(Tuple3<String, String,Integer> tuple3) {
            return tuple3.toString();
        }

    }


    static class AsyncHttpFunction extends RichAsyncFunction<Tuple3<String, String,Integer>, R> {
        CloseableHttpAsyncClient httpclient = null;
        // 打开连接
        @Override
        public void open(Configuration parameters) throws Exception {
            RequestConfig requestConfig = RequestConfig.custom()
                    .setSocketTimeout(3000)
                    .setConnectTimeout(3000) //设置HttpClient连接池超时时间
                    .build();
            httpclient = HttpAsyncClients.custom()
                    .setMaxConnTotal(20) //连接池最大连接数
                    .setDefaultRequestConfig(requestConfig)
                    .build();

            httpclient.start();
        }
        // 异步处理函数的主执行方法
        @Override
        public void asyncInvoke(Tuple3<String, String,Integer> line, ResultFuture<R> resultFuture) throws Exception {
            try {
                HttpPost httpPost = new HttpPost("http://192.168.20.130:8000/screen/parkLive/sendMsgToWeb?parkId="+line.f0+"&msgType=deviceType");
                JSONObject param = new JSONObject();
                param.put("parkId",line.f0);
                param.put("deviceType",line.f1);
                param.put("number",line.f2);
                httpPost.setEntity(new StringEntity(param.toJSONString(), "utf-8"));

                Future<HttpResponse> future = httpclient.execute(httpPost, null);
                HttpResponse response = future.get();
                R r;
                if (response.getStatusLine().getStatusCode() == 200) {
                    HttpEntity entity = response.getEntity();
                    String result = EntityUtils.toString(entity);
                    r = JSON.parseObject(result,R.class);
                }else{
                    r = R.fail();
                }
                resultFuture.complete(Collections.singleton(r));
            } catch (Exception e) {
                e.printStackTrace();
                resultFuture.complete(Collections.singleton(null));
            }
        }
        // 关闭连接
        @Override
        public void close() throws Exception {
            httpclient.close();
        }
    }
}
