package com.hu.hdfs

import java.util.concurrent.TimeUnit

import org.apache.flink.api.common.restartstrategy.RestartStrategies
import org.apache.flink.api.common.serialization.SimpleStringEncoder
import org.apache.flink.core.fs.Path
import org.apache.flink.streaming.api.functions.sink.filesystem.rollingpolicies.DefaultRollingPolicy
import org.apache.flink.streaming.api.scala._
import org.apache.flink.streaming.api.functions.sink.filesystem.StreamingFileSink


/**
 * @Author: hujianjun
 * @Create Date: 2021/1/19 11:46
 * @Describe: 行式文件（文本）存储到hdfs
 */
object RowFormatFile2HDFS {


  def main(args: Array[String]): Unit = {
    val env = StreamExecutionEnvironment.getExecutionEnvironment
    env.setParallelism(1)

    //恢复策略
    env.setRestartStrategy(RestartStrategies.fixedDelayRestart(
      3, // number of restart attempts
      TimeUnit.SECONDS.toMillis(5) // delay
    ))

    val inputStream = env.socketTextStream("localhost", 6666).uid("s1").name("读取socket数据源")

    val hadoopSink = StreamingFileSink
      .forRowFormat(new Path("file:///D:/hujianjun/T/test/hu/"), new SimpleStringEncoder[String]("UTF-8"))
      .withRollingPolicy(
        DefaultRollingPolicy.builder()
          .withRolloverInterval(TimeUnit.MINUTES.toMillis(15))
          .withInactivityInterval(TimeUnit.MINUTES.toMillis(5))
          //          .withMaxPartSize(1024 * 1024 * 1024)
          .withMaxPartSize(1024 * 100)
          .build())
      .build()

    inputStream.addSink(hadoopSink)

    env.execute()
  }
}