package com.bd08.flink.demo

import org.apache.flink.api.common.functions.RichFlatMapFunction
import org.apache.flink.api.scala.createTypeInformation
import org.apache.flink.configuration.Configuration
import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment
import org.apache.flink.util.Collector

object WordCount extends App {



    //完成word count 案例

  //1 创建执行环境  如果我们直接 运行 就是local   如果我们是同 flink run  xx.jar 的时候  就是 通过 start-cluster.sh 的集群环境
  val env = StreamExecutionEnvironment.getExecutionEnvironment

  //2 读取数据源 创建数据集 DataStream  DataSet
  val ds = env.fromCollection(Seq("hadoop flink","spark kafka","flink kafka","flume hadoop"))

  //3 数据处理  处理函数 或者 dsl
  val count = ds.flatMap(line => line.split("\\s+")).map((_,1)).keyBy(_._1).sum(1)
  //val count = ds.flatMap(new MyFlatMap).keyBy(_._1).sum(1)

  //4 数据处处理结果
  //count.print()
  count.addSink(rs => println(rs))

  //5 提交执行
  env.execute("word count")

//  class MyFlatMap extends RichFlatMapFunction[String,(String,Int)]{
//    override def open(parameters: Configuration): Unit = {
//      println("open............")
//    }
//    override def flatMap(line: String, collector: Collector[(String, Int)]): Unit = {
//      line.split("\\s+").foreach(word =>{
//        collector.collect((word,1))
//      })
//    }
//
//    override def close(): Unit = {
//      println("close.............")
//    }
//  }

}
