package com.bigdata.spark.testapp

import com.bigdata.spark.util.MyKafkaUtil
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.{SparkConf, SparkContext}

object streamingapp {
  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf().setMaster("local[4]").setAppName("app1")
    val ssc = new StreamingContext(conf, Seconds(5))

    var topic:String = "gmall_start_0523"

    var groupId = "gmall_dau_0523"

    //返回Kafka离散化流对象。
    val KafkaDStream: InputDStream[ConsumerRecord[String, String]] = MyKafkaUtil.getKafkaStream(topic, ssc, groupId)


    //返回值是Kafka的ConsumerRecord[String, String]类，是一个键值对。


    val jsonDStream: DStream[String] = KafkaDStream.map{
      record =>

      println(record.value().getClass)
      record.value()}  //从对象ConsumerRecord中取出之前放入的东西：String格式的log的json字符串。

    jsonDStream.print()




    //启动 SparkStreamingContext
    ssc.start()
    ssc.awaitTermination()

  }
}
