package com.sjk.flink;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.sjk.flink.exeception.BaseException;
import com.sjk.flink.exeception.ErrorCode;
import com.sjk.flink.functions.JsonValidationProcessFunction;
import com.sjk.flink.kafka.*;
import com.sjk.flink.pojo.OutPutPojo;
import com.sjk.flink.util.ObjectMapperUtil;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.time.Time;
import org.apache.flink.api.java.utils.ParameterTool;
import org.apache.flink.core.fs.FSDataInputStream;
import org.apache.flink.core.fs.FileSystem;
import org.apache.flink.core.fs.Path;
import org.apache.flink.runtime.fs.hdfs.HadoopFsFactory;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer;
import org.apache.flink.util.OutputTag;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.BufferedReader;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.net.URI;
import java.util.*;

public class StreamProcess {

    private static final Logger log = LoggerFactory.getLogger(StreamProcess.class);

    /** 校准及质量配置文件 **/
    private final static Map<String,String> JZZL_CONFIGS = new HashMap<String,String>();

    /** sechma定义文件 **/
    private  final static Map<String,String> SCHEMA_CONFIGS = new HashMap<String,String>();


    public static Properties properties = new Properties();

    public static void main(String[] args) throws Exception {

        //1.读取参数
        ParameterTool parameterTool = ParameterTool.fromArgs(args);
        //地市代码
        //String csdm = parameterTool.get("csdm");
        //专项代码
        String zxdm = parameterTool.get("zxdm");
        if(zxdm==null||zxdm.isEmpty()){
            throw new BaseException(ErrorCode.CODE_E00005);
        }
        //消费组
        String consumerGroup = parameterTool.get("consumergroup",zxdm+"_consumergroup");

        ParameterTool parameter = ParameterTool.fromPropertiesFile(StreamProcess.class.getResourceAsStream("/stream.properties"));
        properties = parameter.getProperties();
        //kafka topic 来源
        String soucreTopic = parameter.get("kafka.topic.source."+zxdm);
        String[] soucretopics = soucreTopic.split(",");
        if(soucretopics==null||soucretopics.length==0){
            throw new BaseException(ErrorCode.CODE_E00006);
        }
        //加载定义配置
        loadConfig(zxdm,soucretopics);

        //质量标注
        boolean zlfxFlg = false;
        String zlfx = parameterTool.get("zlfx");
        if(zlfx==null||zlfx.isEmpty()||zlfx.equalsIgnoreCase("true")){
            zlfxFlg = true;
        }

        //是否测试
        String test = parameterTool.get("test");
        boolean isTest = false;
        String prefix ="";
        if(test==null||test.isEmpty()||test.equalsIgnoreCase("true")){
            isTest = true;
        }
        if(isTest){
            prefix = parameter.get("test.topic.prefix","TEST_");
        }
        //System.out.println("test >>>>>>>>>>>>>>>>>>>>>> "+test);
        //System.out.println("前缀 >>>>>>>>>>>>>>>>>>>>>> "+prefix);
        //测试添加前缀
        String qstTopic = prefix+zxdm+"_"+parameter.get("qst.topic","QST_TY");
        //System.out.println("QstTopic >>>>>>>>>>>>>>>>>>>>>> "+qstTopic);
        //测试添加前缀
        String ectTopic = prefix+zxdm+"_"+parameter.get("ect.topic","ECT_TY");
        //System.out.println("EctTopic >>>>>>>>>>>>>>>>>>>>>> "+ectTopic);
        //并行数
        int parallelNum = Integer.parseInt(parameter.get("kafka.parallels.num."+zxdm));

        //创建读取hdfs文件的文件系统实现
        HadoopFsFactory hadoopFsFactory = new HadoopFsFactory();
        FileSystem fileSystem = hadoopFsFactory.create(new URI("hdfs://hacluster/"));//输入华为hdfs地址
        FSDataInputStream fsDataInputStream = fileSystem
                .open(new Path("/tmp/flink-lk/config.json"));//输入配置文件地址，为hdfs中的绝对路径
        //建议将配置文件格式设置为json，解析更方便
        //将配置文件流式读出
        byte[] bytes = new byte[1024];
        StringBuilder sb = new StringBuilder();
        while (fsDataInputStream.read(bytes) != -1){
            sb.append(new String(bytes));
            bytes = new byte[1024];
        }
        fsDataInputStream.close();
        ObjectMapper objectMapper = ObjectMapperUtil.getObjectMapper();

        //将配置文件解析为json
        JsonNode configJson = objectMapper.readTree(sb.toString().trim());
        //3.构建环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        //Flink 为所有 checkpoints 和 savepoints 提供可选的压缩（默认：关闭）
        env.getConfig().setUseSnapshotCompression(true);
        //设置并行度,为了方便测试，查看消息的顺序，这里设置为1，可以更改为多并行度
        env.setParallelism(parallelNum);
        //checkpoint的设置
        //每隔10s进行启动一个检查点【设置checkpoint的周期】
        env.enableCheckpointing(5000, CheckpointingMode.EXACTLY_ONCE);
        //确保检查点之间有1s的时间间隔【checkpoint最小间隔】
        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(3000);
        //检查点必须在20s之内完成，或者被丢弃【checkpoint超时时间】
        env.getCheckpointConfig().setCheckpointTimeout(30000);
        //同一时间只允许进行一次检查点
        env.getCheckpointConfig().setMaxConcurrentCheckpoints(1);
        //表示一旦Flink程序被cancel后，会保留checkpoint数据，以便根据实际需要恢复到指定的checkpoint
        //取消作业的时候，上一次成功checkpoint的结果，被删除了！意味着不能将上次执行累加的结果无法恢复，因此我们希望取消作业的时候，不要删除已经checkpoit成功的历史数据
        env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
        //设置可容忍的检查点失败数，默认值为0表示不允许容忍任何检查点失败
        env.getCheckpointConfig().setTolerableCheckpointFailureNumber(1);

        //固定延迟重启策略，程序出现异常的时候，重启三次，每次延迟五秒钟重启，超过三次，则程序退出
        env.setRestartStrategy(RestartStrategies.fixedDelayRestart(3, Time.seconds(5)));

        //保持参数
        env.getConfig().setGlobalJobParameters(parameterTool);

        Properties properties = new Properties();
        //todo:在kerberos安全模式下，连接华为mrs kafka需要使用21007端口
        JsonNode configBootServers = configJson.get("bootstrap.servers");
        properties.put("bootstrap.servers", configBootServers.textValue());
        //todo:下面两行配置，为使用principal认证kerberos的配置
        properties.put("security.protocol", "SASL_PLAINTEXT");
        properties.put("sasl.kerberos.service.name", "kafka");
        properties.put("auto.offset.reset", "earliest");
        properties.put("group.id", consumerGroup);
        //单次消费者拉取的最大数据条数，默认值500
        properties.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 2000);
        //在enable.auto.commit 为true的情况下， 自动提交的间隔，默认值5000ms
        properties.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, 1000);
        //消息消费超时时间增加为10分钟
        properties.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, 600000);

        //默认值true，表示消费者会周期性自动提交消费的offset
        if(isTest) {
            //不自动commit
            properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
        }else{
            //自动commit
            properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, true);
        }

        log.info("6.消费ODS-TOPIC");
        //6.消费ODS-TOPIC
        FlinkKafkaConsumer<ConsumerRecord<String, String>> consumer=new FlinkKafkaConsumer<ConsumerRecord<String, String>>(Arrays.asList(soucretopics),new MyKafkaDeserializationSchema(),properties);
        consumer.setStartFromGroupOffsets(); //默认的方法, 按照消费者组读取数据，如果消费者组第一次使用，默认只读取最新的数据
        //consumer.setStartFromEarliest();   //从头开始消费


        //7.定义侧输出流标签
        log.info("7.定义侧输出流标签");
        OutputTag<OutPutPojo> invalidJsonTag = new OutputTag<OutPutPojo>("invalid-json") {
        };

        //验证成功
        OutputTag<OutPutPojo> validJsonTag = new OutputTag<OutPutPojo>("valid-json") {
        };

        OutputTag<OutPutPojo> commonInvalidJsonTag = new OutputTag<OutPutPojo>("common-Invalid-json") {
        };

        //执行性能监测
        //OutputTag<Performance> performanceJsonTag = new OutputTag<Performance>("performance-json") {
        //};



        //8.分流
        log.info("8.分流");
        SingleOutputStreamOperator<ConsumerRecord<String, String>> processStream = env
               .addSource(consumer)
               .process(new JsonValidationProcessFunction(zlfxFlg,SCHEMA_CONFIGS,JZZL_CONFIGS,validJsonTag, invalidJsonTag,commonInvalidJsonTag));
 
        //过滤
        //验证成功
        DataStream<OutPutPojo> validJsonStream = processStream.getSideOutput(validJsonTag);
        //验证失败
        DataStream<OutPutPojo> invalidJsonStream = processStream.getSideOutput(invalidJsonTag);
        //数据格式或程序异常错误
        DataStream<OutPutPojo> commonInvalidJsonStream = processStream.getSideOutput(commonInvalidJsonTag);
        //性能监视
        //DataStream<Performance> performanceJsonStream = processStream.getSideOutput(performanceJsonTag);

        //validJsonStream.print("validStream");
        //invalidJsonStream.print("invalidStream");
        //commonInvalidJsonStream.print("commonInvalidJsonStream");


        //properties.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        //properties.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");

        //设置生产者参数
        //设置了retries参数，可以在Kafka的Partition发生leader切换时，Flink不重启，而是做5次尝试：
        properties.put(ProducerConfig.RETRIES_CONFIG,5);
        properties.put(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG, 1000 * 60 * 5);
        properties.put(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, 1);
        properties.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, true);

        //提交参数
        properties.put(ProducerConfig.MAX_REQUEST_SIZE_CONFIG, 20971520);

        //9. 发往不同的topic
        log.info("成功发送STD");
        FlinkKafkaProducer<OutPutPojo> stdProducer = new FlinkKafkaProducer<>("", new CustomKafkaSerializationSchema(prefix+"STD_"), properties,
                FlinkKafkaProducer.Semantic.EXACTLY_ONCE);
        //fink kafka setPartitioner
        validJsonStream.addSink(stdProducer).name("标准_"+zxdm).uid(zxdm+"_STD_"+UUID.randomUUID().toString());

        //错误的
        log.info("发送给qstty");
        FlinkKafkaProducer<OutPutPojo> qsttyroducer = new FlinkKafkaProducer<>("", new QsttyKafkaSerializationSchema(qstTopic), properties,
                FlinkKafkaProducer.Semantic.EXACTLY_ONCE);
        invalidJsonStream.addSink(qsttyroducer).name("质量分析_"+zxdm).uid(zxdm+"_QST_TY_"+UUID.randomUUID().toString());

        log.info("发送给ectty");
        properties.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 1000);
        properties.setProperty(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG, 1000 * 60 * 5+"");
        FlinkKafkaProducer<OutPutPojo> ecttyroducer = new FlinkKafkaProducer<>("", new EcttyKafkaSerializationSchema(ectTopic), properties,
                FlinkKafkaProducer.Semantic.EXACTLY_ONCE);
        commonInvalidJsonStream.addSink(ecttyroducer).name("解析错误_"+zxdm).uid(zxdm+"_ECT_TY_"+UUID.randomUUID().toString());

        //log.info("性能检测");
        //FlinkKafkaProducer<Performance> performanceroducer = new FlinkKafkaProducer<>("FLINK_XNCS_SJ", new PerformanceKafkaSerializationSchema(), properties,FlinkKafkaProducer.Semantic.EXACTLY_ONCE);
        //performanceJsonStream.addSink(performanceroducer).name("性能监测_"+zxdm).uid(UUID.randomUUID().toString());
        env.execute(zxdm+"_JsonVaildateProcess");

    }


    /**
     * 加载定义配置
     *
     * @param zxdm
     * @param soucretopics
     */
    private  static void loadConfig(String zxdm,String[] soucretopics){
        String josnstr = getJOSNStr("/schema/ODS.json");
        //System.out.println("============================================");
        //System.out.println(josnstr);
        //System.out.println("============================================");
        //主体校验文件配置缓存
        SCHEMA_CONFIGS.put("ODS",josnstr);
        //缓存所有topic的校准及质量配置信息
        for(String str : soucretopics){
            //System.out.println(">>>>>>>>>>>>>>>>>>>>>>>"+str);
            //String tablename= str.replace("ODS_","");
            String tablename = str.substring(str.indexOf("ODS_")+4).trim();
            JZZL_CONFIGS.put(tablename,getJOSNStr("/jzzl/"+zxdm+"/"+tablename+".json"));
            //读取json校验文件，缓存
            //读取验证配置
            SCHEMA_CONFIGS.put(str,getJOSNStr("/schema/"+zxdm+"/"+str+".json"));
        }
    }


    private static String getJOSNStr(String path){
        StringBuffer json = new StringBuffer();
        //读取验证配置
        try {
            InputStream inputStream = StreamProcess.class.getResourceAsStream(path);
            BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(inputStream));
            String line;
            while ((line = bufferedReader.readLine()) != null) {
                json.append(line);
            }
            bufferedReader.close();
            inputStream.close();
        }catch (Exception e) {
            e.printStackTrace();
            //System.out.println(path+ " 找不到schema文件：       " + e.getMessage());
            throw new BaseException(ErrorCode.CODE_E00000);
        }
        return json.toString();
    }

    /**
    public static String getJzzlConfig(String key){
        return JZZL_CONFIGS.get(key);
    }

    public static String getSchemaConfig(String key){
        return SCHEMA_CONFIGS.get(key);
    }
    **/

}