package sparkUtil

import org.apache.spark.SparkConf
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.json4s.DefaultFormats
import org.json4s.jackson.Json
import utils.{KafkaProducer, MyKafkaUtils}

/**
 * @author 刘子浩
 * @date 2023/6/19 0:03
 * */
object classificationInSchool {

  case class Student(Class: String, name: String, Gender: Int, date: String, StudentID: Int, Semester: Int, GradePoint: Int, GorN: String)

  def main(args: Array[String]): Unit = {
    val sparkConf = new SparkConf().setAppName("NetworkWordCount").setMaster("local[2]")
    val ssc = new StreamingContext(sparkConf, Seconds(5))
    ssc.sparkContext.setLogLevel("ERROR")
    var topic = "stuInfo1"
    var groupid = "niit01"
    val origin_value = MyKafkaUtils.getKafkaDStream(ssc, topic, groupid)
    origin_value
      .map(_.value())
      .filter(source => {
        try {
          parallelize(source)
          true
        } catch {
          case ex: Exception => false
        }
      })
      .flatMap(source => {
        val stu = parallelize(source)
        Some(stu.GorN)
      })
      .filter(gorN => gorN == "L" || gorN == "A")
      .countByValue()
      .map { case (gorN, count) =>
        (gorN, count.toInt)
      }
      .reduceByKey(_ + _)
      .foreachRDD { rdd =>
        val resultMap = rdd.collect().toMap
        val res = Json(DefaultFormats).write(resultMap)
        println(resultMap)
        KafkaProducer.send("School", res)
      }

    ssc.start()
    ssc.awaitTermination()
  }


  def parallelize(line: String) = {
    val strings: Array[String] = line.split("\t")
    Student(strings.apply(0), strings.apply(1), strings.apply(2).toInt, strings.apply(3),
      strings.apply(4).toInt, strings.apply(5).toInt, strings.apply(6).toInt,
      strings.apply(7))
  }

}
