package cn.texous.demo.dsj.stream;

import com.amazonaws.auth.AWSCredentials;
import com.amazonaws.auth.BasicAWSCredentials;
import com.amazonaws.services.s3.AmazonS3;
import com.amazonaws.services.s3.AmazonS3Client;
import com.google.common.base.CharMatcher;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.log4j.Logger;
import org.apache.parquet.example.data.Group;
import org.apache.parquet.example.data.simple.SimpleGroupFactory;
import org.apache.parquet.hadoop.ParquetFileWriter;
import org.apache.parquet.hadoop.ParquetWriter;
import org.apache.parquet.hadoop.example.ExampleParquetWriter;
import org.apache.parquet.hadoop.example.GroupWriteSupport;
import org.apache.parquet.hadoop.metadata.CompressionCodecName;
import org.apache.parquet.schema.MessageType;
import org.apache.parquet.schema.OriginalType;
import org.apache.parquet.schema.PrimitiveType;
import org.apache.parquet.schema.Types;
import org.apache.spark.SparkConf;
import org.apache.spark.streaming.Durations;
import org.apache.spark.streaming.api.java.JavaDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import org.apache.spark.streaming.kafka010.ConsumerStrategies;
import org.apache.spark.streaming.kafka010.KafkaUtils;
import org.apache.spark.streaming.kafka010.LocationStrategies;
import org.joda.time.DateTime;
import org.joda.time.DateTimeZone;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
import sun.misc.BASE64Decoder;

import java.io.File;
import java.io.IOException;
import java.io.Serializable;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.function.Function;

/**
 *
 */
public class SparkEventsToS3Task implements Serializable {

    private static final long serialVersionUID = 1L;

    private static final Logger logger = Logger.getLogger(SparkEventsToS3Task.class);

    // 需要读取配置文件 可参考 demo-mq-service ConfigurationFactory
    //    private static final Config CONFIG = ConfigurationFactory.load();
    private static final Map<String, Object> CONFIG = new HashMap<>();
    private static final String APP_NAME = "transfer events to s3 App";
    private static final long DURATION_SECOND = 10;
    private static final long DURATION_MILLISECOND = DURATION_SECOND * 1000L;
    private static final String DATE_TIME_ZONE_STRING = "Asia/Shanghai";
    private static final String TMP_FILE_PATH = "/tmp/parquet/";
    // key to ParquetWriter map
    private static final ConcurrentHashMap<String, ParquetWriter<Group>>
            PARQUET_WRITER_CONCURRENT_HASH_MAP = new ConcurrentHashMap<>();
    private static final AmazonS3 S3;
    private static final MessageType EVENTS_MESSAGE_TYPE;

    static {
        AWSCredentials credentials = new BasicAWSCredentials(
                CONFIG.get("AccessKeyId").toString(), CONFIG.get("SecretAccessKey").toString());
        //        s3 = AmazonS3Client.builder()
        //        .withCredentials(new AWSStaticCredentialsProvider(credentials)).build();
        S3 = new AmazonS3Client(credentials);
        EVENTS_MESSAGE_TYPE =
                Types.buildMessage()
                        .required(PrimitiveType.PrimitiveTypeName.BINARY)
                        .as(OriginalType.UTF8).named("event_time")
                        .required(PrimitiveType.PrimitiveTypeName.BINARY)
                        .as(OriginalType.UTF8).named("create_id")
                        .required(PrimitiveType.PrimitiveTypeName.BINARY)
                        .as(OriginalType.UTF8).named("camp")
                        .required(PrimitiveType.PrimitiveTypeName.BINARY)
                        .as(OriginalType.UTF8).named("app_id")
                        .required(PrimitiveType.PrimitiveTypeName.BINARY)
                        .as(OriginalType.UTF8).named("ad_set_id")
                        .required(PrimitiveType.PrimitiveTypeName.BINARY)
                        .as(OriginalType.UTF8).named("ch_id")
                        .required(PrimitiveType.PrimitiveTypeName.BINARY)
                        .as(OriginalType.UTF8).named("user_id")
                        .required(PrimitiveType.PrimitiveTypeName.BINARY)
                        .as(OriginalType.UTF8).named("offer_id")
                        .required(PrimitiveType.PrimitiveTypeName.BINARY)
                        .as(OriginalType.UTF8).named("creative_id")
                        .required(PrimitiveType.PrimitiveTypeName.BINARY)
                        .as(OriginalType.UTF8).named("event_type")
                        .required(PrimitiveType.PrimitiveTypeName.BINARY)
                        .as(OriginalType.UTF8).named("geo")
                        .required(PrimitiveType.PrimitiveTypeName.BINARY)
                        .as(OriginalType.UTF8).named("device")
                        .required(PrimitiveType.PrimitiveTypeName.BINARY)
                        .as(OriginalType.UTF8).named("os_version")
                        .required(PrimitiveType.PrimitiveTypeName.BINARY)
                        .as(OriginalType.UTF8).named("device_ip")
                        .required(PrimitiveType.PrimitiveTypeName.BINARY)
                        .as(OriginalType.UTF8).named("ua")
                        .required(PrimitiveType.PrimitiveTypeName.BINARY)
                        .as(OriginalType.UTF8).named("sdkImpId")
                        .named("trigger");
    }

    public static void main(String[] args) {
        logger.info("start to init SparkEventsToS3Task!!!");
        SparkConf conf = new SparkConf();
        conf.setAppName(APP_NAME);
        conf.setMaster("local[*]");
        //优雅的关闭
        conf.set("spark.streaming.stopGracefullyOnShutdown", Boolean.TRUE.toString());
        conf.set("spark.default.parallelism", "6");
        String jaasTemplate = "org.apache.kafka.common.security.plain.PlainLoginModule"
                + " required username=\"%s\" password=\"%s\";";
        String jaasCfg = String.format(jaasTemplate, CONFIG.get("user"), CONFIG.get("password"));
        JavaStreamingContext jssc = new JavaStreamingContext(
                conf, Durations.seconds(DURATION_SECOND));
        //连接pipelines-common中的配置的配置
        Map<String, Object> kafkaParams = new HashMap<>();
        kafkaParams.put("bootstrap.servers", String.join(CONFIG.get("hosts").toString()));
        kafkaParams.put("key.deserializer", StringDeserializer.class);
        kafkaParams.put("value.deserializer", StringDeserializer.class);
        //从头开始消费
        kafkaParams.put("group.id", "events_to_s3");
        kafkaParams.put("auto.offset.reset", "earliest");
        kafkaParams.put("enable.auto.commit", Boolean.TRUE.toString());
        kafkaParams.put("auto.commit.interval", "100");

        kafkaParams.put("security.protocol", "SASL_SSL");
        kafkaParams.put("sasl.mechanism", "PLAIN");
        kafkaParams.put("sasl.jaas.config", jaasCfg);
        kafkaParams.put("ssl.endpoint.identification.algorithm", "https");

        Collection<String> topic0 = Collections.singletonList("track_events");
        List<Collection<String>> topics = Collections.singletonList(topic0);
        List<JavaDStream<ConsumerRecord<String, String>>> kafkaStreams
                = new ArrayList<>(topics.size());
        for (Collection<String> topic : topics) {
            kafkaStreams.add(KafkaUtils.createDirectStream(
                    jssc, LocationStrategies.PreferConsistent(),
                    ConsumerStrategies.Subscribe(topic, kafkaParams)));
        }
        JavaDStream<ConsumerRecord<String, String>> stream = jssc.union(kafkaStreams.get(0),
                kafkaStreams.subList(1, kafkaStreams.size()));
        stream.foreachRDD(rdd -> rdd.foreachPartition(SparkEventsToS3Task::patchTransfer));
        jssc.start();

        try {
            jssc.awaitTermination();
        } catch (Exception e) {
            logger.error(e.getMessage(), e);
        }
        logger.info("succ to init SparkEventsToS3Task!!!");
    }


    private static void patchTransfer(Iterator<ConsumerRecord<String, String>> crs) {
        if (crs == null || !crs.hasNext()) {
            logger.info("crs(SparkEventsToS3Task events) is null or empty!!!");
            return;
        }
        ParquetWriter<Group> writer = null;
        String key = null;
        try {
            long current = System.currentTimeMillis();
            key = String.valueOf(getKey(current));
            String fileName = getFileName(key);

            writer = generatorParquetWriter(fileName);
            PARQUET_WRITER_CONCURRENT_HASH_MAP.put(key, writer);

            BASE64Decoder decoder = new BASE64Decoder();
            while (crs.hasNext()) {
                ConsumerRecord<String, String> record = crs.next();
                String msg = CharMatcher.is('\"').trimFrom(record.value());
                String decodeMsg = new String(decoder.decodeBuffer(msg), StandardCharsets.UTF_8);
                Group group = msg2Group(decodeMsg, EVENTS_MESSAGE_TYPE);
                writer.write(group);
            }
        } catch (Exception e) {
            logger.error(e.getMessage(), e);
        } finally {
            if (writer != null) {
                try {
                    writer.close();
                    PARQUET_WRITER_CONCURRENT_HASH_MAP.remove(key);
                    listFilesAndOperatorAfterDelete(SparkEventsToS3Task::uploadFileToS3);
                } catch (IOException e) {
                    e.printStackTrace();
                }
            }
        }
    }

    private static ParquetWriter<Group> generatorParquetWriter(String fileName) throws IOException {
        final String pathName = TMP_FILE_PATH + fileName;
        Configuration configuration = new Configuration();
        GroupWriteSupport.setSchema(EVENTS_MESSAGE_TYPE, configuration);
        // 3. 写数据
        // 2. 声明parquetWriter
        Path path = new Path(pathName);

        return ExampleParquetWriter.builder(path)
                .withWriteMode(ParquetFileWriter.Mode.CREATE)
                .withCompressionCodec(CompressionCodecName.UNCOMPRESSED)
                .withConf(configuration)
                .build();
    }

    private static void listFilesAndOperatorAfterDelete(Function<File, Boolean> operation) {
        File fileDir = new File(TMP_FILE_PATH);
        if (null != fileDir && fileDir.isDirectory()) {
            File[] files = fileDir.listFiles();

            if (null != files) {
                for (int i = 0; i < files.length; i++) {
                    File file = files[i];
                    // 如果是文件夹 继续读取
                    if (file != null && file.isFile()) {
                        Boolean delete = operation.apply(file);
                        if (delete != null && delete)
                            file.delete();
                    }
                }
            }
        }
    }

    private static boolean uploadFileToS3(File file) {
        boolean success = true;
        try {
            if (file.length() > 0
                    && file.getName() != null
                    && file.getName().endsWith(".parquet")) {
                String filename = file.getName();
                String[] sfn = filename.split("-");
                if (sfn.length > 1) {
                    String key = sfn[1];
                    if (!PARQUET_WRITER_CONCURRENT_HASH_MAP.containsKey(key)) {
                        long timeMillis = getTimeMillis(key);
                        String filePrefix = getPartionPrefix(timeMillis);
                        uploadFile(CONFIG.get("bucketName").toString(),
                                file, filePrefix + filename);
                    }
                }
            } else {
                // Do not do anything
            }
        } catch (Exception e) {
            e.printStackTrace();
            success = false;
        }
        return success;
    }

    private static String getFileName(String key) {
        long time = System.currentTimeMillis();
        long rund = time % 100;
        return "events-" + key + "-" + rund + ".parquet";
    }

    private static long getKey(long timeMillis) {
        return durationTimestamp(DURATION_MILLISECOND, DATE_TIME_ZONE_STRING, timeMillis);
    }

    private static long getTimeMillis(String key) {
        return Long.valueOf(key);
    }

    private static String encodeTimestamp(long partitionDurationMs,
                                          String pathFormat,
                                          String timeZoneString,
                                          long timestamp) {
        DateTimeZone timeZone = DateTimeZone.forID(timeZoneString);
        DateTimeFormatter formatter = DateTimeFormat.forPattern(pathFormat).withZone(timeZone);
        DateTime partition = new DateTime(getPartition(partitionDurationMs, timestamp, timeZone));
        return partition.toString(formatter);
    }

    private static long durationTimestamp(long partitionDurationMs,
                                          String timeZoneString,
                                          long timestamp) {
        DateTimeZone timeZone = DateTimeZone.forID(timeZoneString);
        DateTime partition = new DateTime(getPartition(partitionDurationMs, timestamp, timeZone));
        return partition.getMillis();
    }

    private static long getPartition(long timeGranularityMs,
                                     long timestamp,
                                     DateTimeZone timeZone) {
        long adjustedTimeStamp = timeZone.convertUTCToLocal(timestamp);
        long partitionedTime = (adjustedTimeStamp / timeGranularityMs) * timeGranularityMs;
        return timeZone.convertLocalToUTC(partitionedTime, false);
    }

    private static String getPartionPrefix(long timeMillis) {
        String pathPattern = "'year='yyyy/'month='MM/'day='dd/'hour='HH/";
        return encodeTimestamp(DURATION_MILLISECOND,
                pathPattern, DATE_TIME_ZONE_STRING, timeMillis);
    }

    private static boolean uploadFile(String bucketName, File file, String filename) {
        System.out.println("uploadFile " + filename);
        S3.putObject(bucketName, filename, file);
        System.out.println("uploadFile " + filename + " succeed!");
        return true;
    }

    private static Group msg2Group(String msg, MessageType messageType) {
        try {
            Group group = new SimpleGroupFactory(messageType).newGroup();
            group.append("event_time", "");
            return group;
        } catch (Exception e) {
            logger.error(e.getMessage(), e);
        }
        return null;
    }


}
