package com.hzh.flink.sink

import org.apache.flink.api.common.serialization.SimpleStringEncoder
import org.apache.flink.configuration.MemorySize
import org.apache.flink.connector.file.sink.FileSink
import org.apache.flink.core.fs.Path
import org.apache.flink.streaming.api.functions.sink.filesystem.rollingpolicies.DefaultRollingPolicy
import org.apache.flink.streaming.api.scala._

import java.time.Duration
object Demo1FileSink {
  def main(args: Array[String]): Unit = {
    /**
     * 1、创建环境
     */

    val env: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment

    val studentDS: DataStream[String] = env.readTextFile("data/students.txt")

    val countDS: DataStream[(String, Int)] = studentDS.map(stu => (stu.split(",")(4), 1))
      .keyBy(_._1)
      .sum(1)

//    countDS.writeAsText("data/flink/counts")


    val sink: FileSink[(String, Int)] = FileSink
      .forRowFormat(new Path("data/flink/counts"), new SimpleStringEncoder[(String, Int)]("UTF-8"))
      .withRollingPolicy(
        DefaultRollingPolicy.builder()
          //至少包含多少时间的数据
          .withRolloverInterval(Duration.ofSeconds(10))
          //多少时间没有新的数据
          .withInactivityInterval(Duration.ofSeconds(10))
          //数据达到多大
          .withMaxPartSize(MemorySize.ofMebiBytes(1))
          .build())
      .build()


    //使用fileSink

    countDS.sinkTo(sink)




    env.execute()


  }

}
