package com.shujia.flink.core

import java.util.concurrent.TimeUnit

import org.apache.flink.api.common.serialization.SimpleStringEncoder
import org.apache.flink.core.fs.Path
import org.apache.flink.streaming.api.functions.sink.filesystem.StreamingFileSink
import org.apache.flink.streaming.api.functions.sink.filesystem.rollingpolicies.DefaultRollingPolicy
import org.apache.flink.streaming.api.scala._

object Demo3FlinkOnHdfs {
  def main(args: Array[String]): Unit = {
    val env: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment

    /**
      * 读取hdfs中的数据  -- 有界流
      *
      */

    val studentDS: DataStream[String] = env.readTextFile("hdfs://master:9000/data/student")

    val clazzNumDS: DataStream[(String, Int)] = studentDS
      .map(stu => (stu.split(",")(4), 1))
      .keyBy(_._1)
      .sum(1)

    /**
      * 将数据保存到hdfs
      *
      */

    val sink: StreamingFileSink[(String, Int)] = StreamingFileSink
      //指定报错路径和数据的格式
      .forRowFormat(new Path("hdfs://master:9000/data/flink_clazz"), new SimpleStringEncoder[(String, Int)]("UTF-8"))
      .withRollingPolicy(
        DefaultRollingPolicy.builder()
          .withRolloverInterval(TimeUnit.MINUTES.toSeconds(15))
          .withInactivityInterval(TimeUnit.MINUTES.toSeconds(5))
          .withMaxPartSize(1024)
          .build())
      .build()

    clazzNumDS.addSink(sink)


    env.execute()


  }

}
