package cn.edu.flink.scala.tutorial.state.statebackend

import org.apache.flink.api.common.functions.RichMapFunction
import org.apache.flink.api.common.state.{MapState, MapStateDescriptor}
import org.apache.flink.contrib.streaming.state.EmbeddedRocksDBStateBackend
import org.apache.flink.streaming.api.functions.source.SourceFunction
import org.apache.flink.streaming.api.scala._

object RocksDBStateBackendTest {
  def main(args: Array[String]): Unit = {
    val env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI()
    env.setParallelism(1)
    // 配置状态管理方式 rocksdb把状态交给rocksdb管理，状态会储存在硬盘中
    env.setStateBackend(new EmbeddedRocksDBStateBackend())
    // env.enableCheckpointing(1000L)
    // 配置checkpoint地址
    // env.getCheckpointConfig.setCheckpointStorage("file:///D:/tmp")

    val valueStream = env.addSource(new SourceFunction[Long] {
      override def run(ctx: SourceFunction.SourceContext[Long]): Unit = {
        var i = 0L
        while (true) {
          i += 1;
          ctx.collect(i)
        }
      }

      override def cancel(): Unit = ???
    })

    valueStream
      .keyBy(x => true)
      .map(new RichMapFunction[Long, String] {
        lazy val mapState: MapState[Long, String] = getRuntimeContext.getMapState(new MapStateDescriptor[Long, String]("map state", classOf[Long], classOf[String]))

        override def map(value: Long): String = {
          val builder = new StringBuilder
          for (i <- 1L to value) builder.append(value)
          mapState.put(value, builder.toString)
          value.toString
        }
      })
      .print()

    env.execute("RocksDBStateBackendTest")
  }
}
