package com.shujia.core

import java.util.Properties

import org.apache.flink.api.common.serialization.SimpleStringSchema
import org.apache.flink.api.scala._
import org.apache.flink.runtime.state.filesystem.FsStateBackend
import org.apache.flink.streaming.api.environment.CheckpointConfig.ExternalizedCheckpointCleanup
import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer08

object Demo2SavePoint {
  def main(args: Array[String]): Unit = {

    /**
      * 手动触发savepoint
      *
      * flink savepoint b5e667470b34036421a8266ee1fec874  hdfs://node1:9000/flink/savepoint -yid application_1574587724470_0009
      *
      *
      * 恢复任务
      *
      * flink run  -c com.shujia.core.Demo2SavePoint -m yarn-cluster -p 1 -s hdfs://node1:9000/flink/savepoint/savepoint-b5e6
      * 67-6719876b0051  flink-1.0-SNAPSHOT.jar
      *
      *
      */

    val env = StreamExecutionEnvironment.getExecutionEnvironment

    val kafkaProps = new Properties()
    kafkaProps.setProperty("zookeeper.connect", "node2:2181,node3:2181,node4:2181")
    kafkaProps.setProperty("bootstrap.servers", "node2:9092,node3:9092,node4:9092")
    kafkaProps.setProperty("group.id", "asdasd")

    //构建kafkaSource
    val kafkaSource = new FlinkKafkaConsumer08[String]("flinkcp", new SimpleStringSchema, kafkaProps)

    kafkaSource.setStartFromEarliest()

    val ds = env.addSource(kafkaSource)

    val countDS = ds
      .flatMap(_.split(",")).uid("flatMap")
      .map(word => (word, 1)).uid("map")
      //.filter(w=> ! w._1.equals("spark"))   修改代码增加逻辑
      .keyBy(_._1)
      .reduce((x, y) => (x._1, x._2 + y._2)).uid("reduce")

    countDS.print()
    env.execute()
  }
}
