package com.shujia.flink.tf

import org.apache.flink.api.common.RuntimeExecutionMode
import org.apache.flink.streaming.api.scala._

object Demo2TF {
  def main(args: Array[String]): Unit = {
    val env: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment

    env.setRuntimeMode(RuntimeExecutionMode.BATCH)

    val studentDS: DataStream[String] = env.readTextFile("data/students.txt")


    /**
     * filter: 对数据做过滤操作
     * 函数返回true保留数据，函数返回false过滤数据
     *
     */
    val filterDS: DataStream[String] = studentDS.filter(stu => {
      val gender: String = stu.split(",")(3)
      "男".equals(gender)
    })


    /**
     * map: 对ds中的数据做处理，传入一行返回一行，返回数据䣌格式可以改变
     *
     */
    val kvDS: DataStream[(String, Int)] = filterDS.map(stu => {
      val split: Array[String] = stu.split(",")
      val clazz: String = split(4)
      (clazz, 1)
    })

    /**
     * keyBy: 将相同的key发送到下游同一个task中
     */

    val keyByDS: KeyedStream[(String, Int), String] = kvDS.keyBy(kv => kv._1)


    /**
     * sum:聚合计算，对同一个key的数据做聚合计算，只能再keyBy之后使用
     *
     */
    //val sumDS: DataStream[(String, Int)] = keyByDS.sum(1)
    //sumDS.print()


    /**
     * reduce: 对同一个key的数据做聚合
     *
     */
    val reduceDS: DataStream[(String, Int)] = keyByDS.reduce((kv1, kv2) => {
      //kv1和kv2的第一个元素是一样的
      (kv2._1, kv1._2 + kv2._2)
    })

    reduceDS.print()

    env.execute()


  }

}
