package com.flink.com.sink

import com.flink.com.StationLog
import com.flink.com.source.MyCustomerSource
import javax.swing.DefaultRowSorter
import org.apache.flink.api.common.serialization.SimpleStringEncoder
import org.apache.flink.core.fs.Path
import org.apache.flink.streaming.api.functions.sink.filesystem.StreamingFileSink
import org.apache.flink.streaming.api.functions.sink.filesystem.rollingpolicies.DefaultRollingPolicy
import org.apache.flink.streaming.api.operators.StreamSink
import org.apache.flink.streaming.api.scala.{DataStream, StreamExecutionEnvironment}

/**
 * 将数据输入到hdfs 中
 */
object HdfsSink {
  def main(args: Array[String]): Unit = {

    val env = StreamExecutionEnvironment.getExecutionEnvironment
    env.setParallelism(1)
    /**
     * 设置一个滚动策略
     * 默认一个小时一个目录(分桶)
     * withInactivityInterval  不活动的分桶时间
     * withRolloverInterval  每隔两秒生成一个新文件
     */
    val stream: DataStream[StationLog] = env.addSource(new MyCustomerSource)
    val rolling: DefaultRollingPolicy[StationLog, String] = DefaultRollingPolicy.create()
      .withInactivityInterval(2000)
      .withRolloverInterval(2000)
      .build()
    //  3. 创建一个hdfsSink /app-logs
    val myhdfsSink = StreamingFileSink.forRowFormat[StationLog](
      new Path("hdfs://master01.zta.com:8020/finkHdfs"), new SimpleStringEncoder[StationLog]("UTF-8"))
      .withRollingPolicy(rolling)
      .withBucketCheckInterval(1000) //  检查的间隔时间
      .build()

    stream.addSink(myhdfsSink)
    env.execute()

  }

}
