import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils, LocationStrategies}

object getMsg {
  def main(args: Array[String]): Unit = {
    val config = Map(
      "spark.cores" -> "local[*]",
      "kafka.topic" -> "raw_chrony_data"
    )

    val sparkConf = new SparkConf()
      .setMaster(config("spark.cores"))
      .setAppName("StreamingRecommender")
      .set("spark.testing.memory","2147480000")

    // 创建一个SparkSession
    val spark = SparkSession.builder().config(sparkConf).getOrCreate()
    // 拿到streaming context
    val sc = spark.sparkContext
    val ssc = new StreamingContext(sc, Seconds(2)) // batch duration
    import spark.implicits._

    // 定义kafka连接参数
    val kafkaParam = Map(
      "bootstrap.servers" -> "124.221.62.233:9092",
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      "group.id" -> "recommender",
      "auto.offset.reset" -> "earliest"
    )
    val kafkaStream = KafkaUtils.createDirectStream[String, String](ssc,
      LocationStrategies.PreferConsistent,
      ConsumerStrategies.Subscribe[String, String](Array(config("kafka.topic")), kafkaParam))
    val originStream = kafkaStream.map {
      msg =>
        val splitedRows = msg.value().split(":")
        //每组记录的每一行会被认为是一条
        (splitedRows(0).toString,splitedRows(1).toString)
    }
    originStream.print()


    //开始运行
    ssc.start()
    println(">>>>>>>>>>>>>>> streaming started!")
    ssc.awaitTermination()

  }

}
