package com.shujia.flink

import org.apache.flink.api.scala.ExecutionEnvironment
import org.apache.flink.api.scala._
import org.apache.flink.core.fs.FileSystem.WriteMode

object BatchDemo1 {

  def main(args: Array[String]): Unit = {

    //创建flink上下文对象
    val env = ExecutionEnvironment.getExecutionEnvironment



    //读取文件   创建DataSet
    val lines = env.readTextFile("data/students.txt")
    //    lines.map()
    //      lines.flatMap()
    //      lines.mapPartition()
    //      lines.filter()

    val keyDS = lines.map(line => {
      val split = line.split(",")
      (split(4), split(2).toInt)
    })

    keyDS.print()

    //
    keyDS
      .groupBy(0) //分组之后必须接上一个聚合函数
      //      .max(1)//min
      .sum(1)
      .print()

    keyDS.max(1).print()

    keyDS.collect() //将DataSet转换成scala集合
    keyDS.count()

    keyDS.distinct()

    keyDS
      .writeAsText("C:\\bigdata\\bigdata\\data\\flink\\out")

    //最后需要调用executor,触发sink执行
    env.execute()

  }
}
