package com.shujia.state

import org.apache.flink.streaming.api.functions.KeyedProcessFunction
import org.apache.flink.streaming.api.scala._
import org.apache.flink.util.Collector

import scala.collection.mutable

object Demo1NoState {
  def main(args: Array[String]): Unit = {

    //创建flink的环境

    val env: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment


    //设置并行度
    env.setParallelism(2)

    //读取socker数据
    //nc -lk 8888

    val linesDS: DataStream[String] = env.socketTextStream("master", 8888)

    //将单词拆分出来
    val wordsDS: DataStream[String] = linesDS.flatMap(_.split(","))


    val keyByDS: KeyedStream[(String, Int), String] = wordsDS.map((_, 1)).keyBy(_._1)


    val processDS: DataStream[(String, Int)] = keyByDS.process(new KeyedProcessFunction[String, (String, Int), (String, Int)] {

      //保存之前统计的结果
      /**
        * map的数据是保存在TaskManager的内存中， 如果flink任务中途失败，taskmanger挂了，内存中的数据也就丢了
        *
        */
      var map = new mutable.HashMap[String, Int]()

      /**
        * 每一条数据处理一次，每一个key是独立的
        *
        * @param value ： 输入数据
        * @param ctx   ; 上下文对象
        * @param out   ： 将数据发送到下游
        */
      override def processElement(value: (String, Int), ctx: KeyedProcessFunction[String, (String, Int), (String, Int)]#Context, out: Collector[(String, Int)]): Unit = {
        val key: String = value._1
        val i: Int = value._2

        //如果单词不存在返回0，如果已存在返回单词的数量
        val oldCount: Int = map.getOrElse(key, 0)

        val newCount: Int = oldCount + i

        map.put(key, newCount)


        out.collect((key, newCount))
      }
    })


    processDS.print()


    env.execute()

  }

}
