package com.shujia.sink

import org.apache.flink.api.common.serialization.SimpleStringEncoder
import org.apache.flink.core.fs.Path
import org.apache.flink.streaming.api.functions.sink.filesystem.StreamingFileSink
import org.apache.flink.streaming.api.functions.sink.filesystem.rollingpolicies.DefaultRollingPolicy
import org.apache.flink.streaming.api.scala._


object Demo01FileSink {
  def main(args: Array[String]): Unit = {
    val env: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment

    // 通过socket构建无界流
    val socketDS: DataStream[String] = env.socketTextStream("master", 8888)

    // word count
    val wordCntDS: DataStream[(String, Int)] = socketDS
      .flatMap(_.split(","))
      .map(word => (word, 1))
      .keyBy(_._1)
      .sum(1)

    // 构建Stream File Sink
    val sink: StreamingFileSink[(String, Int)] = StreamingFileSink
      // 设置输出路径 及 文件编码
      .forRowFormat(new Path("Flink/data/wc/output"), new SimpleStringEncoder[(String, Int)]("UTF-8"))
      // Flink会输出多个文件 这里可以指定什么时候切换文件
      .withRollingPolicy(
        // 只要任意达到一个条件都会切换
        DefaultRollingPolicy.builder()
          // 当时间达到15s会切换一次文件
          .withRolloverInterval(15 * 1000)
          // 当5s没有新的数据则也会切换一次文件
          .withInactivityInterval(5 * 1000)
          // 当文件大小达到1KB时会切换文件
          .withMaxPartSize(1024)
          .build())
      .build()

    wordCntDS.addSink(sink)


    env.execute()


  }

}
