package kk.learn.flink.work._1

import org.apache.flink.api.scala.{AggregateDataSet, DataSet, ExecutionEnvironment}
import org.apache.flink.api.scala._

/**
 * <p>
 *
 * </p>
 *
 * @author KK
 * @since 2021-04-25
 */
// Scala批处理程序
object WordCountScalaBatch {
  def main(args: Array[String]): Unit = {
    // 输入数据
    val inputPath = "data/hello.txt"
    // 输出文件地址
    val outputPath = "data/output"
    val environment: ExecutionEnvironment = ExecutionEnvironment.getExecutionEnvironment
    environment.setParallelism(10)
    // 读取输入的数据
    val text: DataSet[String] = environment.readTextFile(inputPath)
    val out: AggregateDataSet[(String, Int)] = text
      .flatMap(_.split("\\s+")) // 以任意多的空白字符进行切割
      .map((_, 1))// 映射成 （word， 1）元组
      .groupBy(0)// 以word为key, 进行group by
      .sum(1)// 每个group中， 对二个字段进行求和
    out.writeAsCsv(outputPath, "\n", " ").setParallelism(1) // 将结果写入到输出文件
    environment.execute("scala batch process")// 执行程序
  }
}
