package com.mjf.day2

import org.apache.flink.streaming.api.functions.co.CoFlatMapFunction
import org.apache.flink.streaming.api.scala._
import org.apache.flink.util.Collector

object CoFlatMapExample {
  def main(args: Array[String]): Unit = {

    val env: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment
    env.setParallelism(1)

    val stream1: DataStream[(String, Int)] = env.fromElements(
      ("hadoop", 101),
      ("spark", 102)
    )

    val stream2: DataStream[(String, Int)] = env.fromElements(
      ("hadoop", 8088),
      ("spark", 8080)
    )

    // 直接connect两条流没有任何意义
    // 必须要把相同key的流联合在一起处理
    val connected: ConnectedStreams[(String, Int), (String, Int)] = stream1
      .keyBy(_._1)
      .connect(stream2.keyBy(_._1))

    val res: DataStream[String] = connected.flatMap(new MyCoFlatMapFunction())

    res.print()

    env.execute("CoFlatMapExample")

  }

  class MyCoFlatMapFunction extends CoFlatMapFunction[(String, Int), (String, Int), String] {
    // flatMap1处理来自第一条流的元素
    override def flatMap1(in1: (String, Int), collector: Collector[String]): Unit = {
      collector.collect(in1._1 + "的IP是：" + in1._2)
      collector.collect(in1._1 + "的IP是：" + in1._2)
    }

    // flatMap2处理来自第二条流的元素
    override def flatMap2(in1: (String, Int), collector: Collector[String]): Unit = {
      collector.collect(in1._1 + "的端口是：" + in1._2)
    }
  }

}
