package com.stone.qixiangju

import org.apache.flink.api.scala._

/**
 * 批处理代码
 */
object WordCount {
  def main(args: Array[String]): Unit = {
    //创建一个批处理的一个环境
    val env = ExecutionEnvironment.getExecutionEnvironment

    val pop = System.getProperties(); // 获取当前的系统属性集
    System.out.println(pop.getProperty("os.name")); // 操作系统的版本

    var inputPath = "D:\\spark\\workspace\\cq_bigdata\\data\\words.txt"
    if (pop.getProperty("os.name").contains("Linux")) {
      inputPath = "/opt/flinkdata/words.txt"
    }

    val inputDataSet = env.readTextFile(inputPath)

    //分词之后做count
    val wordcountSet = inputDataSet
      .flatMap(lines => lines.split(" "))
      .map((_, 1))
      .groupBy(0) //用第一个来分组
      .sum(1) // 用第二个来做统计

    //打印
    wordcountSet.map(x => {
      x._1 + " " + x._2
    }).print()

    // 有界集合 运行完就可以关闭
  }

}

/****
 flink run  -m yarn-cluster -c com.lecosa.flink.ReduceTest  /home/flink-cdc-1.0.jar

flink run   -c com.stone.qixiangju.WordCount   cq_flink_cdc.jar

**/