
import java.util.Properties
import java.util.concurrent.TimeUnit
import cn.getech.data.development.bean.FlinkStreamSyncHiveObj
import org.apache.flink.api.common.serialization.{SimpleStringEncoder, SimpleStringSchema}
import org.apache.flink.core.fs.Path
import org.apache.flink.streaming.api. TimeCharacteristic
import org.apache.flink.streaming.api.functions.sink.filesystem.bucketassigners.BasePathBucketAssigner
import org.apache.flink.streaming.api.functions.sink.filesystem.rollingpolicies.DefaultRollingPolicy
import org.apache.flink.streaming.api.functions.sink.filesystem.{OutputFileConfig, StreamingFileSink}
import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer
import org.slf4j.{Logger, LoggerFactory}


object FlinkDStreamSyncHDFSMain {

  private val logger: Logger = LoggerFactory.getLogger(this.getClass)
  private var obj: FlinkStreamSyncHiveObj = null

  def main(args: Array[String]): Unit = {
    setupDataSource(args)
    val conf = new org.apache.flink.configuration.Configuration()
    val env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(conf)

    //删除文件（夹）
    val prop = new Properties
    prop.setProperty("flink.partition-discovery.interval-millis", "30000")
    prop.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer")
    prop.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer")
    obj.kafkaResource.params.foreach(x => prop.setProperty(x.field_name, x.class_type))
    val kafkaConsumer = new FlinkKafkaConsumer[String](obj.kafkaResource.topic, new SimpleStringSchema, prop)
    kafkaConsumer.setCommitOffsetsOnCheckpoints(true)
    kafkaConsumer.setStartFromEarliest()

    val config = OutputFileConfig
      .builder()
      .withPartPrefix(System.currentTimeMillis().toString)
      .withPartSuffix(".txt")
      .build()

    val path = new Path("hdfs://bigdata-test-2/user/hive/warehouse/test.db/real_test_1221_cover/dt=16100")
    val sink: StreamingFileSink[String] = StreamingFileSink
      .forRowFormat(path, new SimpleStringEncoder[String]("UTF-8"))
      .withBucketAssigner(new BasePathBucketAssigner())
      .withRollingPolicy(
        DefaultRollingPolicy.builder()
          .withRolloverInterval(TimeUnit.MINUTES.toMillis(5)) // 5 minute 滚动一次
          .withInactivityInterval(TimeUnit.MINUTES.toMillis(5)) // 5 minute 不活跃时间
          .withMaxPartSize(128 * 1024 * 1024) // 10mb
          .build())
      .withOutputFileConfig(config)
      .build()

    import org.apache.flink.streaming.api.scala._
    val ds: DataStream[String] = env.addSource[String](kafkaConsumer)
    ds.print("=====")
    ds.addSink(sink)
    env.execute(obj.jobName)
  }



  // 处理资源
  private def setupDataSource(args: Array[String]): Unit = {
    if (args.length < 1)
      obj = FlinkStreamSyncHiveObj("data-development-streaming-20201130\\src\\main\\resources\\flink-streaming-hive-sync.json")
    else
      obj = FlinkStreamSyncHiveObj(args(0))
    obj.jsonParse
    logger.info(obj.toString)
  }
}


