package sparkstreaming.lesson08

import kafka.serializer.StringDecoder
import org.apache.spark.streaming.dstream.DStream
import org.apache.spark.streaming.kafka.KafkaUtils
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.streaming.{Seconds, StreamingContext}

/**
  * Created by Administrator on 2018/5/12.
  */
object KafkaTest {

  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setMaster("local[2]").setAppName("OutoutTest")
    val sc = new SparkContext(conf)
    val ssc = new StreamingContext(sc,Seconds(2))
    ssc.checkpoint("hdfs://hadoop1:9000/streamingkafka")
    /**
      * 数据的输入
      *
* def createDirectStream[K: ClassTag,V: ClassTag,KD <: Decoder[K]: ClassTag,VD <: Decoder[V]: ClassTag] (
      ssc: StreamingContext,
      kafkaParams: Map[String, String],
      topics: Set[String]
  )
      *
      *
      */
    val kafkaParams = Map("metadata.broker.list" -> "hadoop1:9092")
    val topics = Set("aura")
    val kafkaDStream: DStream[String] = KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder](
      ssc, kafkaParams, topics).map(_._2)



    /**
      * 数据的处理
      * 也已经比较正式了
      */

    kafkaDStream.flatMap(_.split(","))
      .map((_,1))
      .reduceByKey(_+_)
        .print()


    ssc.start()
    ssc.awaitTermination()
    ssc.stop()


  }

}
