//package com.hu.hdfs
//
//import java.time.ZoneId
//import java.util.concurrent.TimeUnit
//
//import com.hu.entity.Sensor
//import org.apache.flink.api.common.restartstrategy.RestartStrategies
//import org.apache.flink.core.fs.Path
//import org.apache.flink.formats.parquet.avro.ParquetAvroWriters
//import org.apache.flink.runtime.state.memory.MemoryStateBackend
//import org.apache.flink.streaming.api.CheckpointingMode
//import org.apache.flink.streaming.api.environment.CheckpointConfig
//import org.apache.flink.streaming.api.functions.sink.filesystem.StreamingFileSink
//import org.apache.flink.streaming.api.functions.sink.filesystem.bucketassigners.DateTimeBucketAssigner
//import org.apache.flink.streaming.api.scala._
//import org.apache.parquet.hadoop.metadata.CompressionCodecName
//
///**
// * @Author: hujianjun
// * @Create Date: 2021/1/19 16:35
// * @Describe:
// */
//object Streaming2ColumnFormatFileCompression {
//  def main(args: Array[String]): Unit = {
//    val env = StreamExecutionEnvironment.getExecutionEnvironment
//    env.setParallelism(1)
//    env.enableCheckpointing(1000)
//    val checkpointConfig = env.getCheckpointConfig
//    //保存EXACTLY_ONCE
//    checkpointConfig.setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE)
//    //每次ck之间的间隔，不会重叠
//    checkpointConfig.setMinPauseBetweenCheckpoints(2000L)
//    //每次ck的超时时间
//    checkpointConfig.setCheckpointTimeout(20000L)
//    //如果ck执行失败，程序是否停止
//    checkpointConfig.setTolerableCheckpointFailureNumber(0)
//    //job在执行CANCE的时候是否删除ck数据
//    checkpointConfig.enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION)
//
//    //指定保存ck的存储模式，这个是默认的
//    val stateBackend = new MemoryStateBackend(10 * 1024 * 1024, false)
//    env.setStateBackend(stateBackend)
//
//    //恢复策略
//    env.setRestartStrategy(
//      RestartStrategies.fixedDelayRestart(
//        3, // number of restart attempts
//        TimeUnit.SECONDS.toMinutes(3) // delay
//      )
//    )
//
//    val socketStream = env.socketTextStream("localhost", 6666)
//    //    val input = socket.map(f => (f, 1L))
//    //      .keyBy(_._1).sum(1)
//    //      .map(f => new Sensor(f._1, f._2,null,null))
//
//    val bucketAssigner = new DateTimeBucketAssigner("yyyy/MMdd/HH", ZoneId.of("Asia/Shanghai"))
//
//    val streamingFileSink = StreamingFileSink.forBulkFormat(new Path("file:///Users/leohe/Data/output/flinkout/columnformat/"),
//      ParquetAvroWriters.forGenericRecord(classOf[Sensor],CompressionCodecName.SNAPPY))
//      .withBucketCheckInterval(1000 * 60)
//      .withBucketAssigner(bucketAssigner)
//      .build()
//    input.addSink(streamingFileSink)
//
//    env.execute()
//  }
//}
