package com.cmsr.hdpf.sink.hdfs

import com.cmsr.hdpf.util.ConfigHelper
import org.apache.flink.api.common.serialization.SimpleStringEncoder
import org.apache.flink.core.fs.Path
import org.apache.flink.streaming.api.functions.sink.filesystem.bucketassigners.DateTimeBucketAssigner
import org.apache.flink.streaming.api.functions.sink.filesystem.rollingpolicies.DefaultRollingPolicy
import org.apache.flink.streaming.api.functions.sink.filesystem.{OutputFileConfig, StreamingFileSink}

import java.util.concurrent.TimeUnit

object HdfsSink {

  /*Write to data lake as hdpf/ods/table/date*/
  def GetDataLakeSink(folder: String, format: String): StreamingFileSink[String] = {

    val outputPath = ConfigHelper.datalake

    val config: OutputFileConfig = OutputFileConfig
      .builder()
      .withPartPrefix("prefix")
      .withPartSuffix(format)
      .build();

    val dataLakeSink: StreamingFileSink[String] = StreamingFileSink
      .forRowFormat(new Path(outputPath + folder), new SimpleStringEncoder[String]("UTF-8"))
      .withRollingPolicy(
        DefaultRollingPolicy.builder()
          .withRolloverInterval(TimeUnit.MINUTES.toMillis(60))
          .withInactivityInterval(TimeUnit.MINUTES.toMillis(5)) // 五分钟内没收到新的数据就滚动
          .withMaxPartSize(1024 * 1024 * 1024)
          .build())
      .withBucketAssigner(new DateTimeBucketAssigner[String]("yyyy-MM-dd"))
      .withBucketCheckInterval(1)
      .withOutputFileConfig(config)
      .build()

    dataLakeSink
  }

}
