package com.example

import org.apache.flink.api.java.utils.ParameterTool
import org.apache.flink.api.scala.ExecutionEnvironment
import org.apache.flink.core.fs.FileSystem
// 隐式转换
import org.apache.flink.api.scala._

object WordCountOne {
  def main(args: Array[String]): Unit = {
    //获取参数
    val params = ParameterTool.fromArgs(args)
    var output: String = ""
    if (params.has("output")) {
      output = params.get("output")
    } else {
      output = "hdfs://node1:8020/wordcount/output_" + System.currentTimeMillis
    }
    //    执行环境  批处理程序入口对象
    val env = ExecutionEnvironment.getExecutionEnvironment
    // 加载、创建初始化数据
    val sourceDs: DataSet[String] = env.fromElements("itcast hadoop spark", "itcast hadoop spark", "itcast hadoop", "itcast")

    val wordsDs: DataSet[String] = sourceDs.flatMap(_.split(" "))

    val wordAndOneDs: DataSet[(String, Int)] = wordsDs.map((_, 1))
    val groupDs: GroupedDataSet[(String, Int)] = wordAndOneDs.groupBy(0)
    // 聚合
    val aggDs: AggregateDataSet[(String, Int)] = groupDs.sum(1)
    // 覆盖的模式 cpu的核心数8， 可以设置 全局并行数：env.setParallelism(1)
    aggDs.writeAsText(output, FileSystem.WriteMode.OVERWRITE).setParallelism(1)
    //    aggDs.writeAsText("/export/server/aa.txt", FileSystem.WriteMode.OVERWRITE).setParallelism(1)
    //    aggDs.writeAsText("E:\\note\\flink-project\\temp\\aa.txt", FileSystem.WriteMode.OVERWRITE).setParallelism(1)
    //(hadoop,3)
    //(itcast,4)
    //(spark,2)
    val parallelism = env.getParallelism
    println("zjy---------")
    println(parallelism) // 8  默认的并行数为：8

    // 出发执行
    env.execute()
//    flink run -c com.example.WordCountOne day01-1.0-SNAPSHOT.jar --output /export/server/bb.txt

  }
}
