package com.niit.bigdataPro

import java.util.HashMap
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig, ProducerRecord}
import org.apache.spark.{HashPartitioner, SparkConf, SparkContext}
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.kafka010.KafkaUtils
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.streaming.kafka010.ConsumerStrategies.Subscribe
import org.apache.spark.streaming.kafka010.LocationStrategies.PreferConsistent
import org.apache.kafka.common.serialization.StringDeserializer
import org.json4s.jackson.Serialization.write
import org.apache.spark.sql.execution.streaming.CommitMetadata.format
/**
 * @Author 张润鸣
 * @Date 2022 11 26 09 08
 **/
object KafkaToSpark {
  def main(args: Array[String]): Unit = {

    /*sparK 读取 kafka
    spark 配置 是否本地运行，应用程序名称
    构建 spark streaming ，设置时间间隔
    kafka 配置项  kafka服务，key value的反序列化 ， groupID ， 消费方式 earliest
    读取 kafka 数据，spark streaming， 位置策略， topic 名字
    spark streaming 什么时候关闭， 接收器，等待接收器关闭之后*/
    val group = "Louise"
    val topic = "stuInfo"
    val conf = new SparkConf().setMaster("local[*]").setAppName("sparkKafka")
    //val sc = new SparkContext(conf)
    val ssc = new StreamingContext(conf, Seconds(2))

    // spark streaming 一系列 rdd ，基于内存 计算， checkpoint
    ssc.checkpoint("./checkpoint")
    // 设置 日志 级别 ，error
    ssc.sparkContext.setLogLevel("error")

    val kafkaParams = Map[String, Object](
      "bootstrap.servers" -> "Louise:9092",
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      "group.id" -> group,
      "auto.offset.reset" -> "earliest",
      "enable.auto.commit" -> (false: java.lang.Boolean)
    )

    // 订阅topic
    // topic name
    val topicName = Array(topic)
    /**
     * 1.消费kafka相应主题的数据
     */
    val dataStream: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream[String, String](
      ssc,
      // 位置策略
      PreferConsistent,
      // 订阅
      Subscribe[String, String](topicName, kafkaParams)
    )

    val line: DStream[String] = dataStream.map(_.value())
    /**
     * 2.分别统计所有男女人数各自的总和
     */
    // 创造数据格式：（1，1），（0，1）
    val sexDStream = line.map { x =>
      val works = x.split("\t")
      (works(2), 1)
    }

    // 分别获取男女的RDD
    val m_sexDStream = sexDStream.filter(x => x._1.equals("1")) // 男生
    val f_sexDStream = sexDStream.filter(x => x._1.equals("0")) // 女生

    // 使用 reduceByKeyAndWindow 算子获取统计结果
    val f_sexCountDStream: DStream[(String, Int)] = f_sexDStream.reduceByKeyAndWindow((a: Int, b: Int) => a + b, Seconds(2), Seconds(2))
    val m_sexCountDStream: DStream[(String, Int)] = m_sexDStream.reduceByKeyAndWindow((a: Int, b: Int) => a + b, Seconds(2), Seconds(2))

    f_sexCountDStream.foreachRDD(
      rdd =>
        if (!rdd.isEmpty()) {
          // producer 功能 写入到 kafka
          val kafkaParams = new HashMap[String, Object]()
          kafkaParams.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "Louise:9092")
          kafkaParams.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer")
          kafkaParams.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer")

          val producer = new KafkaProducer[String, String](kafkaParams)

          val rddValue = rdd.collect
          println("女生总人数")
          rddValue.foreach(println)

          val count_data = write(rddValue)

          val producerRecord = new ProducerRecord[String, String]("result", "result2", count_data)

          producer.send(producerRecord)
        }
    )

    m_sexCountDStream.foreachRDD(
      rdd =>
        if (!rdd.isEmpty()) {
          // 写入到 kafka
          val kafkaParams = new HashMap[String, Object]()
          kafkaParams.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "Louise:9092")
          kafkaParams.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer")
          kafkaParams.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer")

          val producer = new KafkaProducer[String, String](kafkaParams)

          val rddValue = rdd.collect
          println("男生总人数")
          rddValue.foreach(println)

          val count_data = write(rddValue)

          val producerRecord = new ProducerRecord[String, String]("result", "result2", count_data)

          producer.send(producerRecord)
        }
    )

    /**
     * 3.各个学期男女人数各自的总和
     */
    val sex_semesterDStream = line.map { x =>
      val works = x.split("\t")
      (works(2) + "_" + works(5), 1)
    }

    val m_sex_semesterDStream = sex_semesterDStream.filter(x => x._1.split("_")(0).equals("1")) // 男生
    val f_sex_semesterDStream = sex_semesterDStream.filter(x => x._1.split("_")(0).equals("0")) // 女生

    // 使用 reduceByKeyAndWindow 算子获取统计结果
    val m_sex_semesterCountDStream: DStream[(String, Int)] = m_sex_semesterDStream.reduceByKeyAndWindow((a: Int, b: Int) => a + b, Seconds(2), Seconds(2))
    val f_sex_semesterCountDStream: DStream[(String, Int)] = f_sex_semesterDStream.reduceByKeyAndWindow((a: Int, b: Int) => a + b, Seconds(2), Seconds(2))

    m_sex_semesterCountDStream.foreachRDD(
      rdd =>
        if (!rdd.isEmpty()) {
          // 写入到 kafka
          val kafkaParams = new HashMap[String, Object]()
          kafkaParams.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "Louise:9092")
          kafkaParams.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer")
          kafkaParams.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer")

          val producer = new KafkaProducer[String, String](kafkaParams)

          val rddValue = rdd.collect

          println("男生各个学期总人数")
          rddValue.foreach(println)

          val count_data = write(rddValue)

          val producerRecord = new ProducerRecord[String, String]("result", "result3", count_data)

          producer.send(producerRecord)
        }
    )

    f_sex_semesterCountDStream.foreachRDD(
      rdd =>
        if (!rdd.isEmpty()) {
          // 写入到 kafka
          val kafkaParams = new HashMap[String, Object]()
          kafkaParams.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "Louise:9092")
          kafkaParams.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer")
          kafkaParams.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer")

          val producer = new KafkaProducer[String, String](kafkaParams)

          val rddValue = rdd.collect

          println("女生各个学期总人数")
          rddValue.foreach(println)

          val count_data = write(rddValue)

          val producerRecord = new ProducerRecord[String, String]("result", "result3", count_data)

          producer.send(producerRecord)
        }
    )

    /**
     * 4.在籍和不在籍的男女人数各自的总和
     */
    val sex_statusDStream = line.map { x =>
      val works = x.split("\t")
      (works(2) + "_" + works(6), 1)
    }
    val m_sex_statusDStream = sex_statusDStream.filter(x => x._1.split("_")(0).equals("1")) // 男生
    val f_sex_statusDStream = sex_statusDStream.filter(x => x._1.split("_")(0).equals("0")) // 女生

    // 使用 reduceByKeyAndWindow 算子获取统计结果
    val m_sex_statusCountDStream: DStream[(String, Int)] = m_sex_statusDStream.reduceByKeyAndWindow((a: Int, b: Int) => a + b, Seconds(2), Seconds(2))
    val f_sex_statusCountDStream: DStream[(String, Int)] = f_sex_statusDStream.reduceByKeyAndWindow((a: Int, b: Int) => a + b, Seconds(2), Seconds(2))

    m_sex_statusCountDStream.foreachRDD(
      rdd =>
        if (!rdd.isEmpty()) {
          // producer 功能 写入到 kafka
          val kafkaParams = new HashMap[String, Object]()
          kafkaParams.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "Louise:9092")
          kafkaParams.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer")
          kafkaParams.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer")

          val producer = new KafkaProducer[String, String](kafkaParams)

          val rddValue = rdd.collect

          println("男生离校在校总人数")
          rddValue.foreach(println)

          val count_data = write(rddValue)

          val producerRecord = new ProducerRecord[String, String]("result", "result4", count_data)

          producer.send(producerRecord)
        }
    )

    f_sex_statusCountDStream.foreachRDD(
      rdd =>
        if (!rdd.isEmpty()) {
          // producer 功能 写入到 kafka
          val kafkaParams = new HashMap[String, Object]()
          kafkaParams.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "Louise:9092")
          kafkaParams.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer")
          kafkaParams.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer")
          //构造producer
          val producer = new KafkaProducer[String, String](kafkaParams)

          val rddValue = rdd.collect

          println("女生离校在校总人数")
          rddValue.foreach(println)

          val count_data = write(rddValue)
          //不转化为json类型
          val producerRecord = new ProducerRecord[String, String]("result", "result4", count_data)

          producer.send(producerRecord)
        }
    )

    ssc.start()
    ssc.awaitTermination()
    ssc.stop()


  }
}
