package com.shujia.state

import org.apache.flink.api.common.functions.{RichMapFunction, RuntimeContext}
import org.apache.flink.api.common.state.{ValueState, ValueStateDescriptor}
import org.apache.flink.configuration.Configuration
import org.apache.flink.streaming.api.scala._

object Demo2ValueState {
  def main(args: Array[String]): Unit = {
    //创建flink的环境

    val env: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment


    //设置并行度
    env.setParallelism(2)

    //读取socker数据
    //nc -lk 8888

    val linesDS: DataStream[String] = env.socketTextStream("master", 8888)

    //将单词拆分出来
    val wordsDS: DataStream[String] = linesDS.flatMap(_.split(","))


    val keyByDS: KeyedStream[(String, Int), String] = wordsDS.map((_, 1)).keyBy(_._1)


    //使用map算子加上状态统计单词的数量
    val countDS: DataStream[(String, Int)] = keyByDS.map(new MyRichMapFunction)

    countDS.print()

    env.execute()

  }
}


class MyRichMapFunction extends RichMapFunction[(String, Int), (String, Int)] {

  /**
    * 状态本质上就是一个变量（value，list ,map）
    * 和普通变量的区别是，状态会倍checkpoint持久化到hdfs中，如果任务中途失败，重新启动后可以接着计算
    *
    *
    * 状态的数据一开始放在内存中，会随着checkpoint倍永久保存到hdfs中
    *
    *
    */

  var valueState: ValueState[Int] = _

  override def open(parameters: Configuration): Unit = {

    /**
      * 定义状态
      * 由于前面做了keyBy，所以状态是每一个key一个状态
      *
      */

    //获取flink执行环境
    val context: RuntimeContext = getRuntimeContext

    //创建状态的描述对象
    val valueStateDesc = new ValueStateDescriptor[Int]("count", classOf[Int])

    //获取或者创建状态
    valueState = context.getState(valueStateDesc)

  }

  override def map(value: (String, Int)): (String, Int) = {

    //每一个数据更新状态

    //获取之前的统计结果
    val old: Int = valueState.value()


    //加上的数据
    val newCount: Int = old + value._2

    //更新状态
    valueState.update(newCount)


    //将数据发送到下游
    (value._1, newCount)
  }
}
