package cn.gwm.flink.streaming.task;

import cn.gwm.flink.streaming.constant.BaseFields;
import cn.gwm.flink.streaming.constant.DefaultConstant;
import cn.gwm.flink.streaming.constant.VehicleType;
import cn.gwm.flink.streaming.ods.ModelHandle;
import cn.gwm.flink.streaming.ods.model.StandardFieldConf;
import cn.gwm.flink.streaming.sink.kafka.KafkaSinkProducer;
import cn.gwm.utils.ConfigLoader;
import cn.hutool.db.Db;
import cn.hutool.db.ds.DSFactory;
import cn.hutool.json.JSON;
import cn.hutool.json.JSONObject;
import cn.hutool.json.JSONUtil;
import org.apache.flink.api.common.RuntimeExecutionMode;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.common.state.*;
import org.apache.flink.api.common.time.Time;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.contrib.streaming.state.EmbeddedRocksDBStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.ProcessFunction;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer;
import org.apache.flink.util.Collector;
import org.apache.flink.util.OutputTag;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.sql.SQLException;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;

/**
 * ods 接云服务任务
 * @author GW00256253
 */
public class OdsYunTestTask {

    public static void main(String[] args) throws Exception {
        ConfigLoader.init(args);
        final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        setEnv(env,"OdsYunTestTask");
        env.addSource(getKafkaConsumer()).addSink(getProducer());
//        test(env);
        env.execute();
    }

    private static void test(StreamExecutionEnvironment env) {
        SingleOutputStreamOperator<JSONObject> map = env.addSource(getKafkaConsumer()).map(JSONUtil::parseObj)
                .filter(it->"LGWFFUA67MB000002".equals(it.getStr(BaseFields.vin))
                        ||"LGWFFUA64NB000024".equals(it.getStr(BaseFields.vin)));
        map.map(new MapFunction<JSONObject, Object>() {
            @Override
            public Object map(JSONObject entries) throws Exception {
//                System.out.println(entries.getStr(BaseFields.command)+ " " +entries.getStr("acquisitionTime")+" : "+entries.getStr("uploadTime")+" : "+ entries.size());
                List<JSONObject> beanList = entries.getBeanList(BaseFields.BIG_DATA_STATUS, JSONObject.class);
//                System.out.println(beanList.size());
                //key为item_time ,value为（字段：值）对象
                Map<String, List<JSONObject>> map = new HashMap<>(64);
                if (beanList != null && beanList.size() > 0) {
                    for (JSONObject bigDataStatus : beanList) {
                        String codeValue = bigDataStatus.getStr(BaseFields.BIG_DATA_CODE);
                        if (codeValue.startsWith("2042237")||codeValue.startsWith("2011770")){
//                            System.out.println(entries);
                            List<JSONObject> itemList = bigDataStatus.getBeanList(BaseFields.BIG_DATA_ITEMS, JSONObject.class);
                            if (itemList != null && itemList.size() > 0) {
                                for (JSONObject item : itemList) {
                                    String aLong = item.getStr(BaseFields.BIG_DATA_ITEMS_VALUE);
                                    if(!"-".equals(aLong)){
                                        System.out.println(bigDataStatus);
                                        break;
                                    }
                                }
                            }
                        }
                        if (codeValue.startsWith("2042280")){
                            List<JSONObject> itemList = bigDataStatus.getBeanList(BaseFields.BIG_DATA_ITEMS, JSONObject.class);
                            if (itemList != null && itemList.size() > 0) {
                                for (JSONObject item : itemList) {
                                    String aLong = item.getStr(BaseFields.BIG_DATA_ITEMS_VALUE);
                                    if(!"-".equals(aLong)){
                                        System.out.println(bigDataStatus);
                                        break;
                                    }
                                }
                            }
                        }
                    }
                }
                return null;
            }
        });
    }

    private static FlinkKafkaProducer getProducer(){
        Properties prodProps = new Properties();
        prodProps.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, ConfigLoader.get("bootstrap.servers"));
        prodProps.setProperty(ProducerConfig.ACKS_CONFIG, "all");
//        //发送频率
        prodProps.setProperty(ProducerConfig.LINGER_MS_CONFIG, "1000");
        //批量发送
        prodProps.setProperty(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG, "300000");
        // 5m
        prodProps.setProperty(ProducerConfig.MAX_REQUEST_SIZE_CONFIG, "5242880");
        FlinkKafkaProducer producer = KafkaSinkProducer.producerBuilder("test", prodProps,BaseFields.vin);
        return producer;
    }

    private static FlinkKafkaConsumer<String> getKafkaConsumer(){
        Properties props = new Properties();
        props.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, ConfigLoader.get("yun.server"));
        props.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "consumer-"+ConfigLoader.get("yun.p01.topic")
                +System.getenv().get(DefaultConstant.BOOT_PROFILES_ACTIVE));
        //如果value合法，则自动提交偏移量
        props.put("enable.auto.commit", "true");
        //设置多久一次更新被消费消息的偏移量
        props.put("auto.commit.interval.ms", "1000");
        //设置会话响应的时间，超过这个时间kafka可以选择放弃消费或者消费下一条消息
        props.put("session.timeout.ms", "30000");
        //自动重置offset
        props.put("auto.offset.reset","earliest");
        props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        final FlinkKafkaConsumer<String> kafkaConsumer = new FlinkKafkaConsumer<>(
                ConfigLoader.get("yun.p01.topic"),
                new SimpleStringSchema(),
                props
        );
//        kafkaConsumer.setStartFromTimestamp(1686300063000L);
        kafkaConsumer.setStartFromEarliest();
        return kafkaConsumer;
    }

    private static void setEnv(StreamExecutionEnvironment env,String taskName){
        // 3.1）设置每隔30s一个周期开启checkpoint（checkpoint开启周期，不能太长也不能太短：1-5分钟）
        env.enableCheckpointing(300 * 1000L);
        // 启动类型
        env.setRuntimeMode(RuntimeExecutionMode.STREAMING);
        // 3.2）设置检查点的mode，exactly-once，保证数据的一次性语义
        env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
        // 3.3）设置两次checkpoint之间的时间间隔，避免两次间隔时间太近导致频繁的checkpoint
        env.getCheckpointConfig().setCheckpointInterval(300*1000L);
        // 3.4）设置checkpoint的超时时间（一般设置一个checkpoint周期的二分之一）
        env.getCheckpointConfig().setCheckpointTimeout(600 * 1000L);
        // 3.5）设置checkpoint的最大尝试次数，同一个时间内有几个checkpoint在运行
        env.getCheckpointConfig().setMaxConcurrentCheckpoints(1);
        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(5000L);
        // 3.6）设置job作业取消的时候，是否保留checkpoint的计算结果
        env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
        // 3.7）设置job作业运行过程中，如果checkpoint失败，是否停止作业
        env.getCheckpointConfig().setFailOnCheckpointingErrors(true);
        // 3.8）设置checkpoint的存储后端，使用rocksdb作为状态后端
        env.setStateBackend(new EmbeddedRocksDBStateBackend());
        // 设置可容忍的检查点的失败数
        env.getCheckpointConfig().setTolerableCheckpointFailureNumber(3);
        //启用不对齐检查点保存方式
        env.getCheckpointConfig().enableUnalignedCheckpoints();
        env.getCheckpointConfig().setCheckpointStorage(ConfigLoader.get("hdfsUri") + "/flink/checkpoints/" + taskName);
        env.setRestartStrategy(RestartStrategies.failureRateRestart(10, Time.minutes(15),Time.seconds(10)));
    }

}