package SparkStreaming

import java.util.HashMap

import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig, ProducerRecord}
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.SparkConf
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.streaming.kafka010.ConsumerStrategies.Subscribe
import org.apache.spark.streaming.kafka010.KafkaUtils
import org.apache.spark.streaming.kafka010.LocationStrategies.PreferConsistent

/**
  * @author Lu Ruotong
  * @date 2022/12/5 10:54
  */
object SemesterCountBoysGirls {
  def main(args: Array[String]): Unit = {
    //    System.setProperty("hadoop.home.dir", "D:\\spark\\hadoop-2.7.3")
    //    System.setProperty("HADOOP_USER_NAME", "root")
    //使用SparkStream完成WordCount

    //Spark配置对象
    val group = "niit12"
    val topic = "stuInfo"
    val conf = new SparkConf().setMaster("local[*]").setAppName("CountSemesterSum").set("spark.testing.memory", "512000000")
    val ssc = new StreamingContext(conf, Seconds(2))
    //每隔2s分别统计各个学期男女生各自的总人数

    //checkpoint
    ssc.checkpoint("./checkpoint")
    //日志 error
    ssc.sparkContext.setLogLevel("error")

    //准备读取kafka参数  导入java.util.Map
    val kafkaParams = Map[String, Object](
      "bootstrap.servers" -> "niit01:9092",
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      "max.poll.records" -> "1000", //设置最大拉取数据的时间为1s  每一秒拉取一次数据
      "group.id" -> group, //组id
      "auto.offset.reset" -> "earliest", //设置指针偏移重置从开始的数据开始
      "enable.auto.commit" -> (false: java.lang.Boolean) //设置关闭自动提交
    )
    //spark 连接kafka
    //topic
    val topicName = Array(topic)
    val streamRDD = KafkaUtils.createDirectStream[String, String](
      ssc, //ssc spark-streaming context
      PreferConsistent, //位置策略
      Subscribe[String, String](topicName, kafkaParams)
    )
    //从kafka中采集数据
    //读取kafka中 stuInfo 的数据 并实时 统计每个学期男女生的总人数
    streamRDD.foreachRDD(kafkaRdd => {
      //若不为空，进入条件语句
      if (!kafkaRdd.isEmpty()) {
        val lines = kafkaRdd.map(_.value())
          .map(x => {
            // 每一行返回的 line 为 数组
            val line = x.split("\t")
            // 返回值为 (学期，男生)或者(学期，女生)
            // 抽取每行中特定数据
            (line(5).toInt, line(2).toString)
          })
        //统计后数据格式是（（学期，男/女），男生总人数/女生总人数）
        val l = lines.map((_, 1)).reduceByKey(_ + _).collect()

        //郭天凌添加的
        val props = new HashMap[String, Object]()
        props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "niit01:9092")
        props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer")
        props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer")
        //构建producer
        val producer = new KafkaProducer[String, String](props)
        //转换为json类型
        /* val dataResult = write(ns_w.reduceByKey(_ + _).collect())
         val dataResult1 = write(nv_w.reduceByKey(_ + _).collect())
         producer.send(new ProducerRecord[String,String]("result18",dataResult.toString,dataResult1.toString))*/
        // val dataResult1 = nv_w.map((_,1)).reduceByKey(_ + _).collect()
        val dataResult = l
        dataResult.foreach(
          y => {
            producer.send(new ProducerRecord[String, String]("result_semester", y.toString))
          }
        )
        /* dataResult1.foreach(
           y => {
             producer.send(new ProducerRecord[String,String]("result",y.toString))
           }
         )*/
      }
    })
    // ssc 关闭
    ssc.start()
    ssc.awaitTermination()
    ssc.stop()
  }
}

