package com.shujia.state

import org.apache.flink.api.common.functions.RuntimeContext
import org.apache.flink.api.common.state.{ValueState, ValueStateDescriptor}
import org.apache.flink.configuration.Configuration
import org.apache.flink.streaming.api.functions.KeyedProcessFunction
import org.apache.flink.streaming.api.scala._
import org.apache.flink.util.Collector

object Demo1ValueState {

  def main(args: Array[String]): Unit = {

    val env: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment

    val lines: DataStream[String] = env.socketTextStream("master",8888)

    val kvDS: DataStream[(String, Int)] = lines
      .flatMap(_.split(","))
      .map((_,1))

    kvDS
      .keyBy(_._1)
      .process(new MyKeyedProcessFunction)
      //.sum(1)
      .print()

    env.execute()


  }

}

class MyKeyedProcessFunction extends KeyedProcessFunction[String,(String,Int),(String,Int)]{

  //在状态中保存单词数量
  var state: ValueState[Int] = _

  /**
    * 在processElement之前执行
    * 在open方法中定义状态
    *
    * 状态中的数据会随着checkpoint保存到hdfs中，如果中途任务失败重新启动后可以接着计算
    *
    * 状态是为每一个key保存一个
    *
    * @param parameters
    */

  override def open(parameters: Configuration): Unit = {

    println("执行open-------------------")

    //获取flink运行环境上下文对象
    val context: RuntimeContext = getRuntimeContext

    //定义一个状态描述对象
    val valueState = new ValueStateDescriptor[Int]("count",classOf[Int])

    //获取一个状态
    state = context.getState(valueState)

  }

  /**
    * 每条数据执行一次
    *
    * @param value 数据
    * @param ctx 上下文对象
    * @param out 将数据发送到下游
    */
  override def processElement(value: (String, Int), ctx: KeyedProcessFunction[String, (String, Int), (String, Int)]#Context, out: Collector[(String, Int)]): Unit = {

    //获取状态中保存的数据，之前的统计结果
    val last: Int = state.value()


    //当前的数字
    val curr: Int = value._2

    //更新状态
    state.update(last + curr)

    //将数据发送到下游
    out.collect((value._1,last + curr))


  }
}
