package org.yuanzheng.source

import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment

/**
 * @author yuanzheng
 * @date 2020/6/10-21:26
 */
object HDFSFileSource {
  def main(args: Array[String]): Unit = {
    // 1.初始化flink流计算的环境
    val environment: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment
    // 2.修改并行度
    environment.setParallelism(1) // 默认所有算子的并行度为1
    // 3.导入隐式转换
    import org.apache.flink.streaming.api.scala._
    // 4.读取hdfs上的文件
    val stream: DataStream[String] = environment.readTextFile("hdfs://192.168.1.100:9000/wordcount/yuan.log")
    // 5.单词词频计算
    val result: DataStream[(String, Int)] = stream.flatMap(_.split(" ")).map((_, 1)).keyBy(0).sum(1)
    // 6.定义sink
    result.print()
    // 7.执行
    environment.execute("HDFSFileSource")
  }
}
