package cn.tedu.batch

import org.apache.flink.api.scala.{DataSet, ExecutionEnvironment}
import org.apache.flink.core.fs.FileSystem.WriteMode

/**
 * @author Amos
 * @date 2022/5/19
 */

object BatchWordCount {
  def main(args: Array[String]): Unit = {

    // 1. 创建批处理的环境
    val env = ExecutionEnvironment.getExecutionEnvironment

    env.setParallelism(6)

    // 2. 创建数据源
    val source: DataSet[String] = env.readTextFile("hdfs://hadoop01:8020/test/input/wordcount.txt")
//    source.print()

    // 3. 处理数据
    // 导入隐式转换
    import org.apache.flink.api.scala._
    // 拆分，获取到每一个词
    val words: DataSet[String] = source.flatMap(_.split(" "))
    val wordAndOne: DataSet[(String, Int)] = words.map((_, 1))
//    wordAndOne.print()

    // 分组和聚合
    val result: AggregateDataSet[(String, Int)] = wordAndOne.groupBy(0).sum(1)

    // 4. 指定sink
//    result.print()
//    result.writeAsText("FLINKSCALA/data/output/001")
    result.writeAsText("hdfs://hadoop01:8020/test/output/003",WriteMode.OVERWRITE)
    // flink中所有的算子都是惰性加载，如果没有env.execute,是不会执行的
    env.execute()

  }

}
