package com.atguigu.sparkstreaming.demos

import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.kafka010.ConsumerStrategies.Subscribe
import org.apache.spark.streaming.kafka010.KafkaUtils
import org.apache.spark.streaming.kafka010.LocationStrategies.PreferConsistent
import org.apache.spark.streaming.{Seconds, StreamingContext}

/**
 * Created by Smexy on 2022/7/15
 *
 *    消费多个主题，将多个主题的数据进行关联。
 *
 *    每个主题一个流
 *
 *    只有 DS[K,v]才能关联。
 *
 *    -------------------------------
 *      select
 *
 *      from t1 join t2 on t1.id = t2.id
 *
 *      在算子中，需要把关联的字段作为 K才行。
 *
 *
 *    DS[K,v1]  join DS[K,v2]  =  DS[K,(v1,v2)]
 *
 *    --------------------------------
 *      两个流，只有同一批次的数据，才能关联。
 *
 *      Option: some 有
 *              none 没有
 *
 *    ---------------------------------
 *      常见的错误：
 * Exception in thread "main" org.apache.spark.SparkException:
 *      Only one SparkContext should be running in this JVM (see SPARK-2243).
 *      The currently running SparkContext was created at:
 *
 *      一个JVM只能有一个SparkContext对象
 *
 * Exception in thread "main" java.lang.IllegalArgumentException:
 *      requirement failed: Some of the DStreams have different contexts
 *
 *      只有同一个StreamingContext创建出的DStream才能Join.
 *
 */
object JoinDemo {

  val topic1= "topicA"
  val topic2= "topicB"

  def main(args: Array[String]): Unit = {

    val streamingContext1 = new StreamingContext("local[*]", "TransformDemo", Seconds(10))
    //val streamingContext2 = new StreamingContext(streamingContext1.sparkContext, Seconds(5))

    val kafkaParams = Map[String, Object](
      "bootstrap.servers" -> "hadoop102:9092,hadoop103:9092",
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      "group.id" -> "2203091",
      "auto.offset.reset" -> "latest",
      "enable.auto.commit" -> "true"
    )


    val ds1: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream[String, String](
      streamingContext1,
      PreferConsistent,
      Subscribe[String, String](Array(topic1), kafkaParams)
    )

    val ds2: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream[String, String](
      streamingContext1,
      PreferConsistent,
      Subscribe[String, String](Array(topic2), kafkaParams)
    )

    val ds3: DStream[(String, Int)] = ds1.map(record => (record.value(), 1))
    val ds4: DStream[(String, String)] = ds2.map(record => (record.value(), "2"))

    val ds5: DStream[(String, (Option[Int], Option[String]))] = ds3.fullOuterJoin(ds4)

    ds5.print(1000)

    // 启动APP
    streamingContext1.start()

    // 阻塞进程，让进程一直运行
    streamingContext1.awaitTermination()

  }

}
