package SparkStreaming

import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig, ProducerRecord}
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.SparkConf
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.streaming.kafka010.ConsumerStrategies.Subscribe
import org.apache.spark.streaming.kafka010.KafkaUtils
import org.apache.spark.streaming.kafka010.LocationStrategies.PreferConsistent

import java.util.HashMap

/**
 * @author Lu Ruotong
 * @date 2022/11/19 15:56
 */
object ALCountSumBoysGIRLS {
  def main(args: Array[String]): Unit = {

    /* System.setProperty("hadoop.home.dir", "D:\\spark\\hadoop-2.7.3")
     System.setProperty("HADOOP_USER_NAME", "root")*/
    // 1. sparkconf  设置 是否本地运行 ，appname 应用程序名字， ssc 设置 n 秒
    // 2.  kafka 的配置项  broker ， key value 反序列化 , group id  (成员1 , 消费者组 多)  , kafka 消费 earliest
    // 3. spark 连接 kafka   订阅 kafka topic ，offset 提交 （maven 依赖包 ）
    // 4. 数据处理
    // 5. ssc 什么时候关闭   ， 接收器 ，等待接收器关闭 再关闭
    val group = "niit13"
    val topic = "stuInfo"
    val conf = new SparkConf().setMaster("local[*]").setAppName("KafkaConsumer").set("spark.testing.memory", "512000000")

    val ssc = new StreamingContext(conf, Seconds(5))

    //checkpoint
    ssc.checkpoint("./checkpoint")
    //日志 error
    ssc.sparkContext.setLogLevel("error")

    //准备读取kafka参数  导入java.util.Map
    val kafkaParams = Map[String, Object](
      "bootstrap.servers" -> "niit01:9092",
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      "max.poll.records" -> "1000", //设置最大拉取数据的时间为1s  每一秒拉取一次数据
      "group.id" -> group, //组id
      "auto.offset.reset" -> "earliest", //设置指针偏移重置从开始的数据开始
      "enable.auto.commit" -> (false: java.lang.Boolean) //设置关闭自动提交
    )

    //spark 连接kafka
    //topic
    val topicName = Array(topic)
    val streamRDD = KafkaUtils.createDirectStream[String, String](
      ssc, //ssc spark-streaming context
      PreferConsistent, //位置策略
      Subscribe[String, String](topicName, kafkaParams)

    )
    //读取kafka中 stuInfo 的数据
    streamRDD.foreachRDD(kafkaRdd => {
      //若不为空，进入条件语句
      if (!kafkaRdd.isEmpty()) {
        val lines = kafkaRdd.map(_.value())
          .map(x => {
            // 每一行返回的 line 为 数组 : ip，类别，金额
            val line  = x.split("\t")
            // 返回值为 ( 学期，男女人数)
            (line(6).toString,line(2).toInt)
          })

        val l = lines.map((_, 1)).reduceByKey(_ + _).collect()

        //郭天凌添加的
        val props =new HashMap[String,Object]()
        props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,"niit01:9092")
        props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,"org.apache.kafka.common.serialization.StringSerializer")
        props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,"org.apache.kafka.common.serialization.StringSerializer")
        //构建producer

        val producer = new KafkaProducer[String,String](props)
        //转换为json类型
        /* val dataResult = write(ns_w.reduceByKey(_ + _).collect())
         val dataResult1 = write(nv_w.reduceByKey(_ + _).collect())
         producer.send(new ProducerRecord[String,String]("result18",dataResult.toString,dataResult1.toString))*/
        // val dataResult1 = nv_w.map((_,1)).reduceByKey(_ + _).collect()
        val dataResult =l

        dataResult.foreach(
          y => {
            producer.send(new ProducerRecord[String,String]("result2",y.toString))
          }
        )
        /* dataResult1.foreach(
           y => {
             producer.send(new ProducerRecord[String,String]("result",y.toString))
           }
         )*/
      }
    })
    // ssc 关闭
    ssc.start()
    ssc.awaitTermination()
    ssc.stop()
  }
}
