import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig, ProducerRecord}
import org.apache.kafka.common.serialization.{StringDeserializer, StringSerializer}
import org.apache.spark.SparkConf
import org.apache.spark.sql.{SaveMode, SparkSession}
import org.apache.spark.sql.functions.{sum, when}
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils, LocationStrategies}
import org.apache.spark.streaming.{Seconds, StreamingContext}

import java.util.Properties
import org.json4s.DefaultFormats
import org.json4s.jackson.Serialization

object SparkStream{


  def main(args: Array[String]): Unit = {
    // 创建SparkConf配置项
    val conf = new SparkConf()
      .setMaster("local[*]") // 使用所有可用CPU核心
      .setAppName("helloStream")
      .set("spark.executor.memory", "2g") // 设置executor内存
      .set("spark.driver.memory", "2g") // 设置driver内存
      .set("spark.executor.cores", "4") // 设置每个executor的CPU核心数
      .set("spark.streaming.backpressure.enabled", "true") // 启用背压机制
      .set("spark.streaming.backpressure.initialRate", "1000") // 设置初始消费速率

    // 创建StreamingContext
    val ssc = new StreamingContext(conf, Seconds(2))
    val spark = SparkSession.builder.config(conf).getOrCreate()

    // 导入SparkSession的隐式转换，用于支持DataFrame和Dataset相关的操作
    import spark.implicits._

    // 设置日志级别为error
    ssc.sparkContext.setLogLevel("error")

    // Kafka配置
    val kafkaParams = Map[String, Object](
      "bootstrap.servers" -> "192.168.235.128:9092",
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      "group.id" -> "niit",
      "auto.offset.reset" -> "latest",
      "enable.auto.commit" -> (false: java.lang.Boolean)
    )

    // Kafka生产者配置
    val producerProps = new Properties()
    producerProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "192.168.235.128:9092")
    producerProps.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, classOf[StringSerializer])
    producerProps.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, classOf[StringSerializer])

    // 创建Kafka生产者
    val producer = new KafkaProducer[String, String](producerProps)

    // 订阅Kafka的topic
    val topics = Array("Project")
    val stream = KafkaUtils.createDirectStream[String, String](
      ssc,
      LocationStrategies.PreferConsistent,
      ConsumerStrategies.Subscribe[String, String](topics, kafkaParams)
    )

    // 处理接收到的数据
    val dataStream = stream.map(_.value()).map(_.split("\t")).filter(_.length == 7)

    // JSON序列化支持
    implicit val formats = DefaultFormats

    // 辅助函数：发送结果到Kafka（异步发送）
    def sendResultToKafka(topic: String, introduction: String, result: String): Unit = {
      val message = Map("introduction" -> introduction, "result" -> result)
      val jsonMessage = Serialization.write(message)
      // 使用新的线程异步发送Kafka消息
      new Thread(new Runnable {
        def run(): Unit = {
          producer.send(new ProducerRecord[String, String](topic, jsonMessage))
        }
      }).start()
    }


    // 1) 实时统计每隔2秒所有男女人数各自的总和
    val genderCounts = dataStream.map(parts => (parts(2), 1)).reduceByKeyAndWindow((a: Int, b: Int) => a + b, Seconds(2), Seconds(2))
    genderCounts.foreachRDD { rdd =>
      val maleCount = rdd.filter(_._1 == "1").map(_._2).sum
      val femaleCount = rdd.filter(_._1 == "2").map(_._2).sum
      val totalCount = maleCount + femaleCount
      val result = s"Male: $maleCount, Female: $femaleCount, Total: $totalCount"
      sendResultToKafka("test3", "统计所有男女人数各自的总和:", result)
    }


    // 2) 实时统计每隔2秒各个学期男女人数总和
    val semesterGenderCounts = dataStream.map(parts => ((parts(5), parts(2)), 1)).reduceByKeyAndWindow((a: Int, b: Int) => a + b, Seconds(2), Seconds(2))
    semesterGenderCounts.foreachRDD { rdd =>
      val counts = rdd.collect().toMap
      val result = counts.map { case ((semester, gender), count) => s"Semester: $semester, Gender: $gender, Count: $count" }.mkString("; ")
      sendResultToKafka("test3", "统计各个学期男女人数总和:", result)
    }


    // 3) 实时统计每隔2秒在籍和不在籍男女生
    val statusGenderCounts = dataStream.map(parts => ((parts(6), parts(2)), 1)).reduceByKeyAndWindow((a: Int, b: Int) => a + b, Seconds(2), Seconds(2))
    statusGenderCounts.foreachRDD { rdd =>
      val counts = rdd.collect().toMap
      val inSchoolMaleCount = counts.getOrElse(("A", "1"), 0)
      val inSchoolFemaleCount = counts.getOrElse(("A", "2"), 0)
      val notInSchoolMaleCount = counts.getOrElse(("L", "1"), 0)
      val notInSchoolFemaleCount = counts.getOrElse(("L", "2"), 0)
      val result = s"InSchool - Male: $inSchoolMaleCount, Female: $inSchoolFemaleCount; NotInSchool - Male: $notInSchoolMaleCount, Female: $notInSchoolFemaleCount"
      sendResultToKafka("test3", "统计在籍和不在籍男女生人数:", result)
    }

    // 4) 统计各个班级的男女的人数
    val classGenderCounts = dataStream.map { parts =>
      val className = parts(0)
      val gender = if (parts(2) == "1") "male" else "female"
      ((className, gender), 1)
    }.reduceByKey(_ + _)

    // Define MySQL connection properties
    val jdbcUrl = "jdbc:mysql://localhost:3306/student_stats"
    val connectionProperties = new Properties()
    connectionProperties.put("user", "root")
    connectionProperties.put("password", "123456")
    connectionProperties.put("driver", "com.mysql.cj.jdbc.Driver")

    classGenderCounts.foreachRDD { rdd =>
      if (!rdd.isEmpty()) {
        // Convert RDD to DataFrame
        val classGenderDF = rdd.toDF("class_gender", "count")
        classGenderDF.createOrReplaceTempView("class_gender_view")

        // 使用Spark SQL进行统计
        val resultDF = spark.sql(
          """
      SELECT class_gender._1 AS className, class_gender._2 AS gender, count
      FROM class_gender_view
    """)

        // 聚合结果以得到每个班级的男女人数
        import spark.implicits._
        val aggregatedDF = resultDF.groupBy("className")
          .agg(
            sum(when($"gender" === "male", $"count").otherwise(0)).alias("male_count"),
            sum(when($"gender" === "female", $"count").otherwise(0)).alias("female_count")
          )


        // 将统计结果写入MySQL
        aggregatedDF.write.mode(SaveMode.Append).jdbc(jdbcUrl, "class_gender_counts", connectionProperties)

        // 发送统计结果到Kafka
        val results = resultDF.collect()
        val resultString = results.map { row =>
          val className = row.getString(0)
          val gender = row.getString(1)
          // 使用 getInt 获取 Integer 类型的值，然后转换为 Long 类型
          val count = row.getInt(2).toLong
          s"Class: $className, Gender: $gender, Count: $count"
        }.mkString("; ")

        sendResultToKafka("test3", "统计各个班级的男女的人数:", resultString)
      }
    }


    // 5) 使用Spark RDD统计不同学期各个班级的男女的人数
    val semesterClassGenderCounts = dataStream.map { parts =>
      val semester = parts(5)
      val className = parts(0)
      val gender = if (parts(2) == "1") "male" else "female"
      ((semester, className, gender), 1)
    }.reduceByKey((a, b) => a + b)

    semesterClassGenderCounts.foreachRDD { rdd =>
      val results = rdd.collect()
      val resultString = results.map { case ((semester, className, gender), count) =>
        s"Semester: $semester, Class: $className, Gender: $gender, Count: $count"
      }.mkString("; ")
      sendResultToKafka("test3", "统计不同学期各个班级的男女的人数:", resultString)
    }


    // 启动StreamingContext
    ssc.start()
    // 等待StreamingContext停止（通常情况下不会自动停止）
    ssc.awaitTermination()
  }
}