package com.shujia.core

import java.util.Properties

import org.apache.flink.api.common.serialization.SimpleStringSchema
import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment
import org.apache.flink.api.scala._
import org.apache.flink.runtime.state.filesystem.FsStateBackend
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer08
import org.apache.flink.streaming.api.environment.CheckpointConfig.ExternalizedCheckpointCleanup
object Demo1Checkpoint {
  def main(args: Array[String]): Unit = {

    /**
      * 集群运行
      * 修改配置文件 flink-conf.yaml
      * state.backend: filesystem
      *
      */

    val env = StreamExecutionEnvironment.getExecutionEnvironment

    //开启checkpoint
    env.enableCheckpointing(10000)


    env.getCheckpointConfig.enableExternalizedCheckpoints(ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION)

    //设置checkpoint 保存位置为外部持久化系统
    env.setStateBackend(new FsStateBackend("hdfs://node1:9000/flink/checkpoint"))

    val kafkaProps = new Properties()
    kafkaProps.setProperty("zookeeper.connect", "node2:2181,node3:2181,node4:2181")
    kafkaProps.setProperty("bootstrap.servers", "node2:9092,node3:9092,node4:9092")
    kafkaProps.setProperty("group.id", "asdasda")

    //构建kafkaSource
    val kafkaSource = new FlinkKafkaConsumer08[String]("flinkcp", new SimpleStringSchema, kafkaProps)

    kafkaSource.setStartFromEarliest()

    val ds = env.addSource(kafkaSource)

    val countDS = ds
      .flatMap(_.split(","))
      .map(word => (word, 1))
      .keyBy(_._1)
      .reduce((x, y) => (x._1, x._2 + y._2))


    countDS.print()


    env.execute()
  }
}
