package com.shujia.transformation

import org.apache.flink.api.common.functions.ReduceFunction
import org.apache.flink.streaming.api.scala._

object Demo05Reduce {
  def main(args: Array[String]): Unit = {
    val env: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment

    env.setParallelism(1)

    // 读取students.txt
    val stuDS: DataStream[String] = env.readTextFile("Flink/data/stu/students.txt")

    // 统计每个班级的最大年龄
    //    stuDS
    //      .map(line => {
    //        val stuArr: Array[String] = line.split(",")
    //        (stuArr(0), stuArr(1), stuArr(2), stuArr(3), stuArr(4))
    //      })
    //      .keyBy(_._5)
    //      //
    //      .reduce((x, y) => {
    //        var res: String = x._3
    //        if (y._3 > res) {
    //          res = y._3
    //        }
    //        (y._1, y._2, res, y._4, y._5)
    //      }).print()

    stuDS
      .map(line => {
        val stuArr: Array[String] = line.split(",")
        (stuArr(0), stuArr(1), stuArr(2), stuArr(3), stuArr(4))
      })
      .keyBy(_._5)
      .reduce(new ReduceFunction[(String, String, String, String, String)] {
        // 数据是一条一条传递的 reduce可以将两条相邻的数据合并成一条
        override def reduce(value1: (String, String, String, String, String), value2: (String, String, String, String, String)): (String, String, String, String, String) = {
          val age1: String = value1._3
          val age2: String = value2._3
          if (age1 >= age2) {
            value1
          } else {
            value2
          }
        }
      }).print()

    env.execute()
  }

}
