package com.mjf.transformation

import org.apache.flink.api.common.functions.FlatMapFunction
import org.apache.flink.streaming.api.scala._
import org.apache.flink.util.Collector

object FlatMapDemo {
  def main(args: Array[String]): Unit = {

    val env: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment
    env.setParallelism(1)

    val source: DataStream[String] = env.fromCollection(List(
      "hello world",
      "hello java",
      "hello scala",
      "hello hadoop",
      "spark  flink"
    ))

    source.print("source")

    // 方式一：通过传入函数使用 flatMap
    val result1: DataStream[String] = source.flatMap(_.split("\\W+"))

    // 方式二：通过传入自定义函数类使用 flatMap
    val result2: DataStream[String] = source.flatMap(new MyFlatMapFunction)

    result1.print("函数")
    result2.print("自定义函数类")

    env.execute(FlatMapDemo.getClass.getName)
    
  }
}

class MyFlatMapFunction extends FlatMapFunction[String, String] {
  override def flatMap(value: String, out: Collector[String]): Unit = {
    val words: Array[String] = value.split("\\W+")
    for (elem <- words) {
      out.collect(value + "--->" + elem)
    }
  }
}