package com.shujia.transformaction

import org.apache.flink.api.scala._
import org.apache.flink.streaming.api.scala.{DataStream, StreamExecutionEnvironment}

object Demo1WordCount {
  def main(args: Array[String]): Unit = {

    //创建flink  程序的入口  相当于sparkContext
    val env = StreamExecutionEnvironment.getExecutionEnvironment


    //设计全局并行度
    env.setParallelism(2)

    /**
      *
      * 连接socket  创建DataStream
      *
      * 在linux  中执行  nc -lk 8888
      *
      */
//    val lines: DataStream[String] = env.socketTextStream("node1", 8888)


    //读取文件创建ds
    val lines: DataStream[String] = env.readTextFile("data/words.txt")

    val wordsDS: DataStream[(String, Int)] = lines
      .flatMap(line => line.split(","))
      .map(word => (word, 1))


    /**
      * keyBy  将 同一个key的数据发送到同一个reduce中
      * keyBy   之后需要接上聚合类操作
      */

   val countDS: DataStream[(String, Int)] = wordsDS
      //keyBy  reduce  一起相当于updateStateByKey
      .keyBy(t => t._1)
      .reduce((x, y) => (x._1, x._2 + y._2))


    //打印结果
    countDS.print()


    //启动flink 程序
    env.execute("wd")

  }
}
