package cn.itcast.hello

import org.apache.flink.api.scala.{ExecutionEnvironment, GroupedDataSet}

/**
  * Author itcast
  * Date 2020/1/2 14:56
  * Desc 演示使用Flink完成WordCount
  */
object WordCount {
  def main(args: Array[String]): Unit = {
    //1.准备Flink运行环境
    val env: ExecutionEnvironment = ExecutionEnvironment.getExecutionEnvironment
    //2.准备数据--Source
    import org.apache.flink.api.scala._
    //DataSet[一行行的数据],DataSet是Flink中批处理的数据抽象,表示分布式的数据集,类似RDD
    val linesDataSet: DataSet[String] = env.fromElements("The Apache Flink community released the third bugfix version of the Apache Flink 1.8 series.Running Apache Flink on Kubernetes with KUDO A common use case for Apache Flink is streaming data analytics together with Apache Kafka, which provides a pub/sub model and durability for data streams.","In this post, we demonstrate how to orchestrate Flink and Kafka with KUDO")
    //3.操作数据--Transformation
    //3.1每一行按照空格进行切分
    val wordDataSet: DataSet[String] = linesDataSet.flatMap(_.split(" "))
    //3.2每个单词记为1
    val wordAndOneDataSet: DataSet[(String, Int)] = wordDataSet.map((_,1))
    //3.3分组
    //GroupedDataSet[(单词, 1)]
    //val groupedDataSet: GroupedDataSet[(String, Int)] = wordAndOneDataSet.groupBy(_._1)//Aggregate does not support grouping with KeySelector functions, yet
    val groupedDataSet: GroupedDataSet[(String, Int)] = wordAndOneDataSet.groupBy(0)//0表示使用序号/索引为0的字段进行分组
    //3.4聚合
    val result: AggregateDataSet[(String, Int)] = groupedDataSet.sum(1)//1表示使用序号/索引为1的字段进行求和/聚合
    //4.输出数据--Sink
    //result.print()
    result.setParallelism(1)//最后sink的并行度设置为1,只会往一个文件中输出结果
    //当并行度为1的时候输出为文件
    result.writeAsText("hdfs://node01:9000/workcount/output7")
    //注意:HDFS的写入权限
    System.setProperty("HADOOP_USER_NAME", "root")
    //如果这个还不行
    //hadoop fs -chmod -R 777  /
    //5.触发执行
    //因为上面已经将print注释了,所以需要添加触发执行的代码
    env.execute()//'execute()', 'count()', 'collect()', or 'print()'
  }
}
