/*
 *  Copyright 2020-2025 the original author or authors.
 *  You cannot use this file unless authorized by the author.
 */

package org.ipig.computing.spark.streaming.kafka.impl;

import com.alibaba.fastjson.JSONObject;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang.StringUtils;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import org.ipig.computing.commons.pojo.kafka.KafkaRecordInfo;
import org.ipig.computing.commons.pojo.kafka.RecordInfo;
import org.ipig.computing.spark.streaming.kafka.AbstractSparkStreamKafkaExecutor;
import org.ipig.constants.JavaCnst;

import java.io.File;

/**
 * Generic Spark Stream Kafka Executor实现
 *
 * @author <a href="mailto:comchnts@163.com">chinats</a>
 * @since 1.0
 */
@Slf4j
public class GenericSparkStreamKafkaExecutorImpl extends AbstractSparkStreamKafkaExecutor {

    @Override
    public void doEachRecord(KafkaRecordInfo<String, RecordInfo<JSONObject, JSONObject>> recInfo) {
        RecordInfo info = recInfo.getValue();
        System.out.println(info.toString());
    }

    @Override
    public void postEachRDD(JavaRDD<ConsumerRecord<String, String>> rdd) {
        log.warn("这里可以进行累加器值的获取");
    }

    @Override
    public void init(JavaStreamingContext jsc) {
        JavaSparkContext javaSparkContext = jsc.sparkContext();
        String logLevel = getConf().getLogLevel();
        if (StringUtils.isBlank(logLevel)) {
            logLevel = "WARN";
        }
        javaSparkContext.setLogLevel(logLevel);
        String tmpDir = this.getConf().getCheckpointDir();
        if (StringUtils.isBlank(tmpDir)) {
            tmpDir = System.getProperty(JavaCnst.IO_TMP_DIR);

        }
        javaSparkContext.sc().setCheckpointDir(tmpDir + File.separator + "checkpoint" + File.separator + this.getClass().getName());
    }
}

