package cn.texous.demo.dkc.kafka2aws;

import cn.texous.util.commons.util.system.ExtClasspathLoader;
import cn.texous.demo.dkc.kafka2aws.context.MySinkTaskContext;
import com.amazonaws.auth.SystemPropertiesCredentialsProvider;
import com.google.common.base.CharMatcher;
import io.confluent.connect.s3.S3SinkConnectorConfig;
import io.confluent.connect.s3.S3SinkTask;
import io.confluent.connect.s3.format.json.JsonFormat;
import io.confluent.connect.storage.StorageSinkConnectorConfig;
import io.confluent.connect.storage.common.StorageCommonConfig;
import io.confluent.connect.storage.partitioner.PartitionerConfig;
import io.confluent.connect.storage.partitioner.TimeBasedPartitioner;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.kafka.connect.sink.SinkRecord;
import org.apache.log4j.Logger;
import org.apache.spark.SparkConf;
import org.apache.spark.streaming.Durations;
import org.apache.spark.streaming.api.java.JavaDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import org.apache.spark.streaming.kafka010.ConsumerStrategies;
import org.apache.spark.streaming.kafka010.KafkaUtils;
import org.apache.spark.streaming.kafka010.LocationStrategies;
import sun.misc.BASE64Decoder;

import java.io.Serializable;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;

/**
 * kafka to s3
 *
 * @author leven
 * @since
 */
public class SparkEventsToS3Task implements Serializable {

    private static final long serialVersionUID = 1L;

    private static final Logger logger = Logger.getLogger(SparkEventsToS3Task.class);

    private static final String APP_NAME = "transfer events to s3 App";
    private static final long DURATION_SECOND = 10;
    private static final long DURATION_MILLISECOND = DURATION_SECOND * 1000L;
    private static final String DATE_TIME_ZONE_STRING = "Asia/Shanghai";

    //    private static final String ACCESS_KEY = CONFIG.getStreaming().getAws().getAccessKeyId();
    //    private static final String SECRET_KEY =
    //          CONFIG.getStreaming().getAws().getSecretAccessKey();
    //    private static final String BUCKET_NAME = CONFIG.getStreaming().getAws().getBucketName();

    private static final String ACCESS_KEY = "";
    private static final String SECRET_KEY = "";
    private static final String BUCKET_NAME = "";


    private static final String REGION = "";
    private static final String TOPIC = "";
    protected static final TopicPartition TOPIC_PARTITION = new TopicPartition(TOPIC, 0);

    private static final String HOSTS = "192.168.0.68:9092";

    private static S3SinkTask S_3_SINK_TASK = null;

    static {
        List<String> extJarsPath = new ArrayList<>();
        extJarsPath.add("/tmp/jar");
        ExtClasspathLoader.loadClasspath(extJarsPath, new ArrayList<>());
    }

    public static void main(String[] args) {
        logger.info("start to init SparkEventsToS3Task!!!");
        SparkConf conf = new SparkConf();
        conf.setAppName(APP_NAME);
        conf.setMaster("local[*]");
        //优雅的关闭
        conf.set("spark.streaming.stopGracefullyOnShutdown", Boolean.TRUE.toString());
        conf.set("spark.default.parallelism", "6");

        // String jaasTemplate = "org.apache.kafka.common.security.plain.PlainLoginModule "
        //         + "required username=\"%s\" password=\"%s\";";
        // String jaasCfg = String.format(jaasTemplate,
        //         CONFIG.getProducer().getUser(), CONFIG.getProducer().getPassword());

        JavaStreamingContext jssc = new JavaStreamingContext(
                conf, Durations.seconds(DURATION_SECOND));
        //连接pipelines-common中的配置的配置
        Map<String, Object> kafkaParams = new HashMap<>();
        kafkaParams.put("bootstrap.servers", String.join(",", HOSTS));
        kafkaParams.put("key.deserializer", StringDeserializer.class);
        kafkaParams.put("value.deserializer", StringDeserializer.class);
        //从头开始消费
        kafkaParams.put("group.id", "events_to_s3_d");
        kafkaParams.put("auto.offset.reset", "earliest");
        kafkaParams.put("enable.auto.commit", Boolean.TRUE.toString());
        kafkaParams.put("auto.commit.interval", "100");

        // kafkaParams.put("security.protocol", "SASL_SSL");
        // kafkaParams.put("sasl.mechanism", "PLAIN");
        // kafkaParams.put("sasl.jaas.config", jaasCfg);
        // kafkaParams.put("ssl.endpoint.identification.algorithm", "https");

        MySinkTaskContext context = initS3SinkTask();
        Collection<String> topic0 = Collections.singletonList(TOPIC);
        List<Collection<String>> topics = Collections.singletonList(topic0);
        List<JavaDStream<ConsumerRecord<String, String>>> kafkaStreams
                = new ArrayList<>(topics.size());
        for (Collection<String> topic : topics) {
            kafkaStreams.add(KafkaUtils.createDirectStream(
                    jssc, LocationStrategies.PreferConsistent(),
                    ConsumerStrategies.Subscribe(topic, kafkaParams)));
        }
        JavaDStream<ConsumerRecord<String, String>> stream = jssc.union(kafkaStreams.get(0),
                kafkaStreams.subList(1, kafkaStreams.size()));
        stream.foreachRDD(rdd -> rdd.foreachPartition(SparkEventsToS3Task::patchTransfer));
        jssc.start();

        try {
            jssc.awaitTermination();
            closeS3SinkTask(context);
        } catch (Exception e) {
            logger.error(e.getMessage(), e);
        }
        logger.info("succ to init SparkEventsToS3Task!!!");
    }

    private static MySinkTaskContext initS3SinkTask() {
        System.out.println("start ---------------------------------");
        Set<TopicPartition> assignment = new HashSet();
        assignment.add(TOPIC_PARTITION);
        MySinkTaskContext context = new MySinkTaskContext(assignment);
        S_3_SINK_TASK = new S3SinkTask();
        S_3_SINK_TASK.initialize(context);
        S_3_SINK_TASK.start(createProps(BUCKET_NAME, REGION));
        return context;
    }

    private static void closeS3SinkTask(MySinkTaskContext context) {
        if (S_3_SINK_TASK != null) {
            S_3_SINK_TASK.close(context.assignment());
            S_3_SINK_TASK.stop();
            System.out.println("stop --------------------------------");
        }
    }

    private static void patchTransfer(Iterator<ConsumerRecord<String, String>> crs) {
        if (crs == null || !crs.hasNext()) {
            logger.info("crs(SparkEventsToS3Task events) is null or empty!!!");
            return;
        }

        try {
            BASE64Decoder decoder = new BASE64Decoder();
            List<SinkRecord> records = new ArrayList<>();
            while (crs.hasNext()) {
                ConsumerRecord<String, String> record = crs.next();
                String msg = CharMatcher.is('\"').trimFrom(record.value());
                String decodeMsg = new String(decoder.decodeBuffer(msg), StandardCharsets.UTF_8);
                SinkRecord sinkRecord = new SinkRecord(TOPIC_PARTITION.topic(),
                        TOPIC_PARTITION.partition(), null, record.key(),
                        null, decodeMsg, record.offset());
                records.add(sinkRecord);
            }
            if (!records.isEmpty())
                S_3_SINK_TASK.put(records);
        } catch (Exception e) {
            logger.error(e.getMessage(), e);
        }
    }

    private static Map<String, String> createProps(String bucketName, String region) {
        System.setProperty("aws.accessKeyId", ACCESS_KEY);
        System.setProperty("aws.secretKey", SECRET_KEY);
        Map<String, String> props = new HashMap<>();
        props.put(StorageCommonConfig.STORAGE_CLASS_CONFIG,
                "io.confluent.connect.s3.storage.S3Storage");
        props.put(StorageCommonConfig.DIRECTORY_DELIM_CONFIG, "/");
        props.put(StorageCommonConfig.FILE_DELIM_CONFIG, "#");

        props.put(StorageSinkConnectorConfig.FLUSH_SIZE_CONFIG, "1000000");
        props.put(StorageSinkConnectorConfig.ROTATE_INTERVAL_MS_CONFIG, DURATION_MILLISECOND + "");

        props.put(S3SinkConnectorConfig.CREDENTIALS_PROVIDER_CLASS_CONFIG,
                SystemPropertiesCredentialsProvider.class.getName());
        props.put(S3SinkConnectorConfig.PART_SIZE_CONFIG,
                S3SinkConnectorConfig.PART_SIZE_DEFAULT + "");
        props.put(S3SinkConnectorConfig.REGION_CONFIG, region);
        props.put(S3SinkConnectorConfig.S3_BUCKET_CONFIG, bucketName);
        props.put(S3SinkConnectorConfig.FORMAT_CLASS_CONFIG, JsonFormat.class.getName());

        props.put(PartitionerConfig.PARTITIONER_CLASS_CONFIG,
                TimeBasedPartitioner.class.getName());
        props.put(PartitionerConfig.PATH_FORMAT_CONFIG,
                "'year'=YYYY/'month'=MM/'day'=dd/'hour'=HH");
        props.put(PartitionerConfig.LOCALE_CONFIG, "en");
        props.put(PartitionerConfig.TIMEZONE_CONFIG, "America/Los_Angeles");
        props.put(PartitionerConfig.PARTITION_DURATION_MS_CONFIG, DURATION_MILLISECOND + "");

        return props;
    }

}
