package com.shujia.flink.transformaction

import org.apache.flink.api.common.operators.Order
import org.apache.flink.api.scala._

object Demo7Batch {
  def main(args: Array[String]): Unit = {

    //批处理上下文对象
    val env = ExecutionEnvironment.getExecutionEnvironment

    val ds: DataSet[String] = env.readTextFile("spark/data/students.txt")

    val studentDS = ds.map(line => {
      val split = line.split(",")
      val id = split(0)
      val name = split(1)
      val age = split(2).toInt
      val gender = split(3)
      val clazz = split(4)
      Student(id, name, age, gender, clazz, 1)
    })

    ds.first(10) //.print()

    //分组取topN
    studentDS
      .groupBy(_.clazz)
      .sortGroup(_.age, Order.DESCENDING)
      .first(2)
    //.print()


    studentDS
      .groupBy(4) //指定字段顺序  从0开始
      .sum(5)
      .print()


  }

  case class Student(id: String, name: String, age: Int, gender: String, clazz: String, i: Int)

}
