package main.scala.demo.kafka

import kafka.serializer.StringDecoder
import org.apache.log4j.{Level, Logger}
import org.apache.spark.SparkConf
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.kafka.KafkaUtils
import org.apache.spark.streaming.{Seconds, StreamingContext}

/**
  * MyKafkaStreamDemo
  *
  * @author zhangyimin
  * @date 2018-10-18 下午3:38
  * @version 1.0
  */
object MyKafkaStreamReceiversDemo {
  def main(args: Array[String]): Unit = {
    //创建SparkStreaming对象
    Logger.getLogger("org.apache.spark").setLevel(Level.ERROR)
    Logger.getLogger("org.eclipse.jetty.server").setLevel(Level.OFF)
    //local[2]代表核数为2
    //    val spark = SparkSession.builder().appName("MyNetworkWordCount").master("local[2]").getOrCreate()
    //    val sc = spark.sparkContext
    val sparkConf = new SparkConf().setAppName("MyNetworkWordCount").setMaster("local[2]")
    val streamingContext = new StreamingContext(sparkConf, Seconds(3))
    //创建Topic名称,1表示每次从topic中获取一条数据
    val topic = Map("mydemo1" -> 1)
    val kafkaStream = KafkaUtils.createStream(
      streamingContext,
      "10.16.7.36:2181",
      "myGroup",
      topic,
      StorageLevel.MEMORY_ONLY_SER_2)

    //============数据接收的并行水平================
    //在输入时候进行并行操作,提高系统吞吐量
    val kafkaStreams = (1 to 5).map(i=>KafkaUtils.createStream(
      streamingContext,
      "10.16.7.36:2181",
      "myGroup",
      topic,
      StorageLevel.MEMORY_ONLY_SER_2)
    )

    val unionStreams=streamingContext.union(kafkaStreams)


    //处理每次接收到的数据
    val lineStream1 = unionStreams.map(x => {
      new String(x.toString())
    })

    lineStream1.print()
    //============数据接收的并行水平================







    //处理每次接收到的数据
    val lineStream = kafkaStream.map(x => {
      new String(x.toString())
    })

    lineStream.print()

    streamingContext.start()
    streamingContext.awaitTermination()
  }

}


object MyKafkaStreamDemo {
  def main(args: Array[String]): Unit = {
    //创建SparkStreaming对象
    Logger.getLogger("org.apache.spark").setLevel(Level.ERROR)
    Logger.getLogger("org.eclipse.jetty.server").setLevel(Level.OFF)
    //local[2]代表核数为2
    //    val spark = SparkSession.builder().appName("MyNetworkWordCount").master("local[2]").getOrCreate()
    //    val sc = spark.sparkContext
    val sparkConf = new SparkConf().setAppName("MyNetworkWordCount").setMaster("local[2]")
    val streamingContext = new StreamingContext(sparkConf, Seconds(3))
    //创建Topic名称
    val topic = Set("mydemo1")
    //创建kafka的基本属性 属性值为kafka的broker的ip和port
    val kafkaProps = Map[String, String]("metadata.broker.list" -> "10.16.7.36:9092")


    val kafkaStream = KafkaUtils.createDirectStream[String,String,StringDecoder,StringDecoder](
      streamingContext,
      kafkaProps,
      topic)
    //处理每次接收到的数据

    val lineStream = kafkaStream.map(x => {
      new String(x.toString())
    })


    //kafka的解码器
//    val kafkaStream =(1 to 5).map(i=>( KafkaUtils.createDirectStream[String,String,StringDecoder,StringDecoder](
//      streamingContext,
//      kafkaProps,
//      topic)))
//    //处理每次接收到的数据
//
//    val lineStream = kafkaStream.map(x => {
//      new String(x.toString())
//    })
//
//    lineStream.foreach(println(_))



    lineStream.print()

    streamingContext.start()
    streamingContext.awaitTermination()
  }

}

