package com.shujia.flink.core

import org.apache.flink.streaming.api.scala._

object Demo1WordCount {

  def main(args: Array[String]): Unit = {
    //1、创建flink的运行环境,相当于sparkContext
    val env: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment

    //设置上游数据发送到下游的超时时间，默认是200毫秒
    //设置为0,每一条数据都会发送一次，延迟低，效率低
    env.setBufferTimeout(200)

    //设置并行度，类似spark中分区的概念
    env.setParallelism(2)


    //2、读取数据
    //从socket中读取数据,nc -lk 8888
    val linesDS: DataStream[String] = env.socketTextStream("master", 8888)

    //3、统计单词的数量
    //一行转多行
    val wordsDS: DataStream[String] = linesDS.flatMap(line => line.split(","))

    //转换成kv格式
    val kvDS: DataStream[(String, Int)] = wordsDS.map(word => (word, 1))

    //按照单词分组
    val keyByDS: KeyedStream[(String, Int), String] = kvDS.keyBy(kv => kv._1)

    //对下标为1的字段求和
    val countDS: DataStream[(String, Int)] = keyByDS.sum(1)

    //打印结果
    countDS.print()

    //启动flink任务
    env.execute()

  }
}
