package com.shujia.flink.source

import org.apache.flink.api.common.RuntimeExecutionMode
import org.apache.flink.api.java.DataSet
import org.apache.flink.streaming.api.scala._

object Demo2BatchFileSource {
  def main(args: Array[String]): Unit = {

    val env: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment


    /**
     * 修改处理模式
     * 1、流处理模式，输出持续的结果，每一条数据处理一次并输出结果，可以用于处理无界流和有界流
     * 2、批处理模式，输出最终结果，只能用于处理有界流
     */

    env.setRuntimeMode(RuntimeExecutionMode.BATCH)

    /**
     * 基于文件构建DataStream  --- 有界流
     */
    val linesDS: DataStream[String] = env.readTextFile("flink/data/words.txt")

    val countDS: DataStream[(String, Int)] = linesDS
      .flatMap(_.split(","))
      .map((_, 1))
      .keyBy(_._1)
      .sum(1)

    countDS.print()

    env.execute()
  }

}
