package com.shujia.flink.core

import org.apache.flink.streaming.api.functions.KeyedProcessFunction
import org.apache.flink.streaming.api.scala._
import org.apache.flink.util.Collector

import scala.collection.mutable

object Demo8KeyByProcess {
  def main(args: Array[String]): Unit = {
    val env: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment

    val linesDS: DataStream[String] = env.socketTextStream("master", 8888)

    val wordsDS: DataStream[String] = linesDS.flatMap(_.split(","))

    val keyByDS: KeyedStream[String, String] = wordsDS.keyBy(word => word)



    val countDS: DataStream[(String, Int)] = keyByDS
      .process(new KeyedProcessFunction[String, String, (String, Int)] {

        /**
         * 同一个task中只有一个count变量，多个key的数据忙会共享同一个变量，有冲突
         *
         */
        //var count = 0
        //hashMap集合的数据是保存在taskManager的内存中的
        val hashMap = new mutable.HashMap[String, Int]()
        /**
         * processElement:每一条数据㔘一次，由于前面做了keyBy,所以process是针对每一个key进行处理
         *
         * @param word ：一行数据
         * @param ctx  ：上下文对象
         * @param out  ：用于将数据发送到下游
         */
        override def processElement(word: String,
                                    ctx: KeyedProcessFunction[String, String, (String, Int)]#Context,
                                    out: Collector[(String, Int)]): Unit = {
          //统计单词的数量
          //count += 1

          //从map集合中获取单词的数量
          val count: Int = hashMap.getOrElse(word, 0)

          //基于之前的结果加一，再保存回去
          hashMap.put(word, count + 1)

          //返回单词的数量
          out.collect((word, count + 1))

          println(hashMap)

        }
      })

    countDS.print()

    env.execute()
  }

}
