package com.shujia.flink.tf

import org.apache.flink.api.common.functions.FilterFunction
import org.apache.flink.api.java.functions.KeySelector
import org.apache.flink.streaming.api.scala._

object Demo4Filter {
  def main(args: Array[String]): Unit = {

    val env: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment

    val linesDS: DataStream[String] = env.readTextFile("flink/data/words.txt")

    val kvDS: DataStream[(String, Int)] = linesDS.flatMap(_.split(",")).map((_, 1))

    /**
     * keyBy: 相当于spark中的groupBy，将相同的key发送到同一个task中
     * keyby之后一般需要做聚合计算
     */
    //java api
    val keyByDS: KeyedStream[(String, Int), String] = kvDS
      .keyBy(new KeySelector[(String, Int), String] {
        /**
         * getKey: 获取分组的key
         *
         * @param value :一行数据
         * @return
         */
        override def getKey(value: (String, Int)): String = {
          value._1
        }
      })

    //scala api
    kvDS.keyBy(kv => kv._1)

    //分组之后做聚合计算
    val countDS: DataStream[(String, Int)] = keyByDS.sum(1)

    countDS.print()

    env.execute()
  }

}
