package cn.gubanjie.edu.analysis.streaming
//192.168.1.158:9092
import cn.itcast.edu.bean.Answer
import com.google.gson.Gson
import org.apache.spark.SparkContext
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}

/**
 * 实时的从kafka的edu主题消费数据,并做实时的统计分析,结果直接输出到控制台或者mysql
 */
object StreamingAnalysis {
  def main(args: Array[String]): Unit = {
    // TODO 0,准备环境
    val spark: SparkSession = SparkSession.builder().appName("sparksql").master("local[*]")
      .config("spark.sql.shuffle.partitions", "4") //本次测试时将分区数设置小一点,实际开发中可以根据集群规模调整大小,默认200
      .getOrCreate()
    val sc: SparkContext = spark.sparkContext
    sc.setLogLevel("WARN")
    import spark.implicits._
    import org.apache.spark.sql.functions._
    // TODO 1,加载数据
    val kafkaDF : DataFrame = spark.readStream
      .format("kafka")
      .option("kafka.bootstrap.servers", "192.168.1.158:9092")
      .option("subscribe", "edu")
      .load()
    val valueDS: Dataset[String] = kafkaDF.selectExpr("CAST(value AS STRING)").as[String] //拿到一行数据
    //{"student_id":"学生ID_3","textbook_id":"教材ID_1","grade_id":"年级ID_1","subject_id":"科目ID_1_数学","chapter_id":"章节ID_chapter_2","question_id":"题目ID_32","score":5,"answer_time":"2021-11-13 16:30:20","ts":"Nov 13, 2021 4:30:20 PM"}
    // TODO 2, 处理数据 解析json

    val answerDS: Dataset[Answer] = valueDS.map(josnStr => {
      val gson = new Gson()
      //json--->对象
      gson.fromJson(josnStr, classOf[Answer])
    })
    // TODO 实时分析.................处理业务
    //1,实时统计Top10热点题



    // 统计题目id的count,并以题目id作为分组,拿前10个
    val result1: Dataset[Row] = answerDS.groupBy('question_id)
      //.agg(count('question_id) as "count")
      .count()
      .orderBy('count.desc)
      .limit(10)

    //2,实时统计答题最活跃的Top10年级
    val result2: Dataset[Row] = answerDS.groupBy('grade_id)
      .count()
      .orderBy('count.desc)
      .limit(10)

    //3,实时统计每个科目的Top10热点题
    val result3: Dataset[Row] = answerDS.groupBy('question_id)
      .agg(
        first('subject_id) as "subject_id",
        count('question_id) as "count"
      )
      .orderBy('count.desc)
      .limit(10)

    //4,实时统计每个学生得分最低的题目Top10 并带上所属学生
    val result4: Dataset[Row] = answerDS.groupBy('student_id)
      .agg(
        min('score) as "minscore",
        first('question_id)
      )
      .orderBy('minscore)
      .limit(10)

    // TODO 3, 输出结果
    result1.writeStream
      .format("console")
      .outputMode("complete")
      .start()
      //同步打印
      //.awaitTermination()
    result2.writeStream
      .format("console")
      .outputMode("complete")
      .start()
    //同步打印
    //.awaitTermination()
    result3.writeStream
      .format("console")
      .outputMode("complete")
      .start()
    //同步打印
    //.awaitTermination()
    result4.writeStream
      .format("console")
      .outputMode("complete")
      // TODO 4, 启动并等待结束
      .start()
    //同步打印
    .awaitTermination()

    // TODO 5,关闭资源
    spark.stop()
  }
}
