package sparkStream

import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig, ProducerRecord}
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.SparkConf
import org.apache.spark.streaming.kafka010.ConsumerStrategies.Subscribe
import org.apache.spark.streaming.kafka010.KafkaUtils
import org.apache.spark.streaming.kafka010.LocationStrategies.PreferConsistent
import org.apache.spark.streaming.{Seconds, StreamingContext}

import java.util

object SparkKafka {
  def main(args: Array[String]): Unit = {

    /* spark streaming实现kafka的消费者
    1) 构建sparkConf 本地运行，运行应用程序名称
    2) 构建sparkStreaming   ---》streamingContext，加载配置
    3) kafka配置 broker,key value,group id,消费模式
    4) spark链接kafka 订阅，topic,streamingContext
    5) 循环的形式打印/ 处理
    6) 开启sparkStreamingContext,监控kafka数据
     */



    //1)构建sparkConf本地运行，运行应用程序名称
    val conf = new SparkConf().setMaster("local[*]").setAppName("helloStream")
    //StreamingContext需要导入依赖
    //spark streaming可以进行流式处理，微批次处理，间隔2秒
    val ssc = new StreamingContext(conf,Seconds(2))

    //spark输出红色info信息   -->error
    ssc.sparkContext.setLogLevel("error")

    //  3) kafka配置 broker,key value,group id,消费模式
    val kafkaParams = Map[String,Object](
      "bootstrap.servers" -> "123.56.187.176:9092",
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      "group.id" -> "niit01",
      "enable.auto.commit" ->(false: java.lang.Boolean)
    )

    //  4) spark链接kafka 订阅，topic,streamingContext
    //topic name
    val topicName = Array("stuInfo")
    val streamRdd = KafkaUtils.createDirectStream[String,String](
      ssc,
      PreferConsistent,
      Subscribe[String,String](topicName,kafkaParams)
    )


    //  producer 配置项
    val property = new util.HashMap[String,Object]()
    property.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "192.168.80.136:9092")
    property.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer")
    property.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer")


    //将数据发送到kafka的topic中，发送到topic test1
    //1）构建producer配置
    //2）创建producer 客户端
    //3）统计好结果，发送到topic

    //时间窗口1
    //streamRDD  kafka  返回的数据   是一个key 一个value形式，数据是value
    val res =streamRdd.map(_.value())
    val result= res.flatMap(_.split("///")).map((_,1)).reduceByKeyAndWindow(_+_,Seconds(4),Seconds(4))
    result.foreachRDD(
      x=>{
        println("--------数据是-------")
        x.foreach(
          //println
        obj =>{
          println(obj)

          //创建新的客户端
          val producer=new KafkaProducer[String,String](property)
          //spark连接kafka，发送数据
          producer.send(new ProducerRecord[String,String]("test1",obj.toString))
          //关闭
          producer.close()


        })       //结果打印

      }
    )





    /*
    // 5)返回kafka 返回的streamRdd(一段有时间间隔的RDD)
    streamRdd.foreachRDD(
      x=> {
        if(!x.isEmpty){//判断是否为空，！相反
          val line = x.map(_.value())//匿名函数
          line.foreach(println)//打印
        }
      })
     */




    // 6) 开启sparkStreamingContext,监控kafka数据
    ssc.start()
    ssc.awaitTermination()//监控kafka的数据
  }

}