package com.flink.com

import org.apache.flink.api.common.functions.{FlatMapFunction, MapFunction}
import org.apache.flink.api.java.tuple.{Tuple, Tuple1}
import org.apache.flink.api.scala.ExecutionEnvironment
import org.apache.flink.util.Collector

case class StationLog(sid: String, var callOut: String, var
callInt: String, callType: String, callTime: Long, duration: Long)

object CollectionSource {
  def main(args: Array[String]): Unit = {

    //    // 1. 初始化flink 的环境
    //    val streamEnv: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment
    //    // 修改并行度
    //    streamEnv.setParallelism(3)
    //    // 导入隐式转化
    //    import org.apache.flink.streaming.api.scala._
    //    val stream: DataStream[StationLog] = streamEnv.fromCollection(Array(
    //      new StationLog("001", "1866", "189", "busy", System.currentTimeMillis(), 0),
    //      new StationLog("002", "1866", "189", "busy", System.currentTimeMillis(), 0),
    //      new StationLog("003", "1866", "189", "busy", System.currentTimeMillis(), 0),
    //      new StationLog("004", "1866", "189", "busy", System.currentTimeMillis(), 0),
    //      new StationLog("005", "1866", "189", "busy", System.currentTimeMillis(), 0)
    //    ))
    //
    //    stream.print()
    //    streamEnv.execute()
    // map 函数
    //    FunctionMap()
    FunctionFlatMap()

  }

  /**
   * 函数之一-----> map
   * 模式：------> 调用用户自定义的MapFunction 对DataStream[T] 数据进行处理，形成新的 Data-Stream[T]  数据的格式发生变化
   * 使用场景： 常用于数据集类的数据进行 ----"清洗和转化", 例如: 需要转大小写,对数据进行加1   等
   *
   * @throws Exception
   */
  private def FunctionMap() {
    val env = ExecutionEnvironment.getExecutionEnvironment
    env.setParallelism(1)
    import org.apache.flink.api.scala._
    val dataStreamSource = env.fromElements(Tuple1.of(1)
      , Tuple1.of(5)
      , Tuple1.of(6))
      .map(new MapFunction[Tuple1[Int], Int] { //准备map操作，将元素做一定的转换，映射
        override def map(value: Tuple1[Int]): Int = value.f0 * 3
      })
      .print()
  }

  /**
   * 函数之 二  **************** flatMap
   * 使用场景 : 适用于一个元素产生一个 多这多个的元素的计算场景  一对多
   * 注意 调用 print()方法，会自动触发execute
   * @throws Exception
   */
  private def FunctionFlatMap(): Unit = {
    val env = ExecutionEnvironment.getExecutionEnvironment
    env.setParallelism(1)
    import org.apache.flink.api.scala._
    env.fromElements(Tuple1.of("hdfs a"), Tuple1.of("hive b"), Tuple1.of("spark t"))
      .flatMap(new FlatMapFunction[Tuple1[String], Tuple1[String]]() {
        override def flatMap(value: Tuple1[String], out: Collector[Tuple1[String]]): Unit = {
          for (s: String <- value.f0.split(" ")) {
            out.collect(Tuple1.of(s))
          }
        }
      }).print()
//      env.execute("flink flatmap operator")
      }

  }
