package com.yeming.flink.practice.sink

import com.yeming.flink.practice.source.{MyCustomerSource, StationLog}
import org.apache.flink.api.common.serialization.SimpleStringEncoder
import org.apache.flink.core.fs.Path
import org.apache.flink.runtime.state.filesystem.FsStateBackend
import org.apache.flink.streaming.api.environment.CheckpointConfig.ExternalizedCheckpointCleanup
import org.apache.flink.streaming.api.functions.sink.filesystem.StreamingFileSink
import org.apache.flink.streaming.api.functions.sink.filesystem.rollingpolicies.DefaultRollingPolicy
import org.apache.flink.streaming.api.scala._

object HdfsFileSink {

  /**
   * 需求：把自定义的source作为数据源，把基站日志写入Hdfs并且每隔两秒钟生成一个文件
   *
   * @param args
   */
  def main(args: Array[String]): Unit = {

    val streamEnv: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment
    streamEnv.setParallelism(1)
    streamEnv.enableCheckpointing(10000)
    streamEnv.setStateBackend(new FsStateBackend("file:///E:/Data/flink/checkpoints"))
    streamEnv.getCheckpointConfig.enableExternalizedCheckpoints(ExternalizedCheckpointCleanup.DELETE_ON_CANCELLATION)
    //读取数据源
    val stream: DataStream[StationLog] = streamEnv.addSource(new MyCustomerSource)

    //默认一个小时一个分桶（目录）
    //设置一个滚动策略
    val rolling: DefaultRollingPolicy[StationLog, String] = DefaultRollingPolicy.builder()
      .withInactivityInterval(2000) //不活动的分桶时间
      .withRolloverInterval(2000) //每隔两秒钟生成一个文件
      .build() //创建

    val sinkBuilder: StreamingFileSink.RowFormatBuilder[StationLog, String, _ <: StreamingFileSink.RowFormatBuilder[StationLog, String, _]] = StreamingFileSink.forRowFormat[StationLog](
      new Path("hdfs://f1:9000/FlinkSink01/"),
      new SimpleStringEncoder[StationLog]("UTF-8"))

    sinkBuilder.withRollingPolicy(rolling).withBucketCheckInterval(1000)
    val hdfsSink: StreamingFileSink[StationLog] = sinkBuilder.build()

    stream.addSink(hdfsSink)

    streamEnv.execute()

  }

}
