package com.desheng.bigdata.flink.batch

import org.apache.flink.api.scala._
/*
    在flink dataset和datastream基于scala的编程中，第一步，务必要导入一个隐式转换
    import org.apache.flink.api.scala._，否则，程序编译不通过
        could not find implicit value for evidence parameter of type
 */
object ScalaFlinkBatchWordCountApp {
    def main(args: Array[String]): Unit = {
        val env = ExecutionEnvironment.getExecutionEnvironment

        val input: DataSet[String] = env.fromCollection(List(
            "If you plan to use Apache Flink together with Apache Hadoop (run Flink on YARN, connect to HDFS, connect to HBase, or use some Hadoop-based file system connector), please check out the Hadoop Integration documentation"
        ))

        val words: DataSet[String] = input.flatMap(line => line.split("\\s+"))

        val pairs: DataSet[WordCount] = words.map(word => WordCount(word, 1))

        val groupedDataSet: GroupedDataSet[WordCount] = pairs.groupBy("word")

        val aggrVal: AggregateDataSet[WordCount] = groupedDataSet.sum("count")

        aggrVal.print()
    }
}

case class WordCount(word: String, count: Int)