package spark

import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord}
import org.apache.spark.SparkConf
import org.apache.spark.streaming.kafka010.LocationStrategies.PreferConsistent
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils}
import org.apache.spark.streaming.{Durations, Seconds, StreamingContext}

import scala.collection.mutable

object zhangyiting {

  def main(args: Array[String]): Unit = {
    // 设置Spark配置
    val conf = new SparkConf().setAppName("SparkStreamingKafkaConsumerGenderCount").setMaster("local[*]")
    // 创建StreamingContext，设置批处理间隔为2秒
    val ssc = new StreamingContext(conf, Durations.seconds(2))
    import org.apache.spark.sql.SparkSession
    val spark = SparkSession.builder.config(conf).getOrCreate()
    spark.sparkContext.setLogLevel("WARN")


    // Kafka相关配置参数
    val kafkaParams = mutable.Map[String, Object]()
    kafkaParams += ("bootstrap.servers" -> "192.168.136.128:9092")
    kafkaParams += ("key.deserializer" -> "org.apache.kafka.common.serialization.StringDeserializer")
    kafkaParams += ("value.deserializer" -> "org.apache.kafka.common.serialization.StringDeserializer")
    kafkaParams += ("group.id" -> "niit")
    kafkaParams += ("auto.offset.reset" -> "earliest")

    // 要消费的Kafka主题列表
    val topics = Array("stuInfo")

    // 创建Kafka DStream，直接读取数据
    val kafkaStream = KafkaUtils.createDirectStream[String, String](
      ssc,
      PreferConsistent,
      ConsumerStrategies.Subscribe[String, String](topics, kafkaParams)
    ).map(record => record.value())

    // -----------------zhanyiting-----------------------------
    val classGenderCountStream = kafkaStream.flatMap(line => {
      val fields = line.split("\t")
      val class1 = fields(0).toInt
      val gender = if (fields(2).toInt == 0) "female" else "male"
      Seq(((class1, gender), 1))
    })
    val classTotalCountStream = classGenderCountStream.reduceByKey(_ + _)
    // 打印实时的各个banji男女人数总和统计结果
    classTotalCountStream.print()

    // Kafka相关配置（用于生产）
    val kafkaParamsForProduce = new java.util.HashMap[String, Object]()
    kafkaParamsForProduce.put("bootstrap.servers", "192.168.136.128:9092")
    kafkaParamsForProduce.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer")
    kafkaParamsForProduce.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer")

    // 将banji性别统计结果发送到指定的Kafka主题
    classTotalCountStream.foreachRDD(rdd => {
      if (!rdd.isEmpty()) {
        rdd.foreach { case ((class1, gender), count) =>
          val producer = new KafkaProducer[String, String](kafkaParamsForProduce)
          val record = new ProducerRecord[String, String]("classStu", s"$class1,$gender,$count")
          producer.send(record)
          producer.close()
        }
      }
    })




    // 启动Spark Streaming应用
    ssc.start()
    ssc.awaitTermination()
  }
}