import org.apache.flink.api.scala._
import org.apache.flink.streaming.api.scala._

object WordCountDemo {

  /*def main(args: Array[String]): Unit = {
    val inputPath = "data/wordcount.txt"
    val outputPath = "out/wordcount.txt"
    val environment: ExecutionEnvironment = ExecutionEnvironment.getExecutionEnvironment
    val text: DataSet[String] = environment.readTextFile(inputPath)
    val out: AggregateDataSet[(String, Int)] = text.flatMap(_.split(" ")).map((_, 1)).groupBy(0).sum(1)

    out.writeAsCsv(outputPath, "\n", " ").setParallelism(1)
    environment.execute("jobName")
  }*/

  /*def main(args: Array[String]): Unit = {
    val inputPath = "data/wordcount.txt"
    val outputPath = "out/wordcount.txt"
    val environment: ExecutionEnvironment = ExecutionEnvironment.getExecutionEnvironment
    val text: DataSet[String] = environment.readTextFile(inputPath)
    val out: AggregateDataSet[(String, Int)] = text.flatMap(_.split(" ")).map((_, 1)).groupBy(0).sum(1)
    out.writeAsCsv(outputPath,"\n", " ").setParallelism(1)
    environment.execute("scala batch process")
  }*/

}
