package org.yuanzheng.sink

import org.apache.flink.api.common.serialization.SimpleStringEncoder
import org.apache.flink.core.fs.Path
import org.apache.flink.streaming.api.functions.sink.filesystem.StreamingFileSink
import org.apache.flink.streaming.api.functions.sink.filesystem.rollingpolicies.DefaultRollingPolicy
import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment
import org.apache.flink.streaming.api.scala._
import org.yuanzheng.source.{CustomSource, StationLog}

/**
 * @author yuanzheng
 * @date 2020/6/15-13:28
 */
object HDFSFileSink {
  // 1.初始化流计算环境
  val streamEnv: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment
  streamEnv.setParallelism(1)
  // 2.读取数据源
  val stream: DataStream[StationLog] = streamEnv.addSource(new CustomSource)
  // 3.分桶策略 默认1个小时一个目录 设置一个滚动策略
  val rolling: DefaultRollingPolicy[StationLog, String] = DefaultRollingPolicy.create()
    .withInactivityInterval(2000) // 不活动的分桶时间
    .withRolloverInterval(2000) // 每隔2秒生成一个文件
    .build() //实现创建
  // 4.创建HDFS的Sink
  val hdfsSink: StreamingFileSink[StationLog] = StreamingFileSink.forRowFormat[StationLog](
    new Path("hdfs://192.168.1.100:9000/hdfssink/"),
    new SimpleStringEncoder[StationLog]("UTF=8"))
    .withRollingPolicy(rolling)
    .withBucketCheckInterval(1000) //检查间隔时间
    .build() //实现创建

  stream.addSink(hdfsSink)
  streamEnv.execute()
}
