package com.mjf.day2

import org.apache.flink.streaming.api.functions.co.CoMapFunction
import org.apache.flink.streaming.api.scala._

/**
 * Connect
 *  功能与union类似，将两个流（union支持两个或以上）合并为一个流，但区别在于connect不要求数据类型一致
 */
object CoMapExample {
  def main(args: Array[String]): Unit = {

    val env: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment
    env.setParallelism(1)

    val stream1: DataStream[(String, Int)] = env.fromElements(
      ("hadoop", 101),
      ("spark", 102)
    )

    val stream2: DataStream[(String, Int)] = env.fromElements(
      ("hadoop", 8088),
      ("spark", 8080)
    )

    // 直接connect两条流没有任何意义
    // 必须要把相同key的流联合在一起处理
    val connected: ConnectedStreams[(String, Int), (String, Int)] = stream1
      .keyBy(_._1)
      .connect(stream2.keyBy(_._1))

    val res: DataStream[String] = connected.map(new MyCoMapFunction())

    res.print()

    env.execute("CoMapExample")

  }

  class MyCoMapFunction extends CoMapFunction[(String, Int), (String, Int), String] {
    // map1处理来自第一条流的元素
    override def map1(in1: (String, Int)): String = {
      in1._1 + "的IP是：" + in1._2
    }

    // map2处理来自第二条流的元素
    override def map2(in1: (String, Int)): String = {
      in1._1 + "的端口是：" + in1._2
    }
  }
}
