package com.shujia.flink.core

import org.apache.flink.api.common.RuntimeExecutionMode
import org.apache.flink.streaming.api.scala._

object Demo2BatchWordCount {
  def main(args: Array[String]): Unit = {
    //流处理环境
    val env: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment

    /**
     * 设置处理模式
     * BATCH: 批处理模式， 输出最终结果
     * STREAMING： 流处理模式， 持续输出结果
     */
    env.setRuntimeMode(RuntimeExecutionMode.BATCH)

    /**
     * 1、读取文件-- 有界流（相当于批处理）
     */

    val linesDS: DataStream[String] = env.readTextFile("data/words.txt")

    val wordsDS: DataStream[String] = linesDS.flatMap(_.split(","))

    val kvDS: DataStream[(String, Int)] = wordsDS.map((_, 1))

    val kyByDS: KeyedStream[(String, Int), String] = kvDS.keyBy(_._1)

    val countDS: DataStream[(String, Int)] = kyByDS.sum(1)

    countDS.print()

    env.execute()
  }

}
