package com.shujia.flink.tf
import org.apache.flink.api.common.RuntimeExecutionMode
import  org.apache.flink.streaming.api.scala._
object Demo01TF {
  def main(args: Array[String]): Unit = {
    val env: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment


    env.setRuntimeMode(RuntimeExecutionMode.BATCH)

    val stuDS: DataStream[String] = env.readTextFile("data/students.txt")


    /**
     * filter:对数据做过滤操作
     * 函数返回true保留数据，函数返回false进行过滤
     *
     */
    val filterDS: DataStream[String] = stuDS.filter(stu => {
      val gender: String = stu.split(",")(3)
      "男".equals(gender)
    })

    /**
     * map:对DS中的数据做处理，传入一行返回一行，返回数据格式可以改变
     */

    val kvDS: DataStream[(String, Int)] = filterDS.map(stu => {
      val clazz: String = stu.split(",")(4)
      (clazz, 1)
    })

    /**
     * keyBy:将相同数据key发送到下游同一个task中
     *
     */
    val keyDS: KeyedStream[(String, Int), String] = kvDS.keyBy(_._1)


    /**
     * sum:聚合计算
     * 对同一个key的数据做聚合计算，只能在keyBy之后使用
     */

//    val sumDS: DataStream[(String, Int)] = keyDS.sum(1)
//    sumDS.print()


    /**
     * reduce:对同一个Key的数据做聚合
     */

    val reduceDS: DataStream[(String, Int)] = keyDS.reduce((kv1, kv2) => {
      (kv1._1, kv2._2 + kv1._2)
    })


    reduceDS.print()
    /**
     * 启动
     */
    env.execute()






  }

}
