package org.niit.service

import org.apache.spark.sql.{DataFrame, Dataset, Row}
import org.niit.bean.AnswerWithRecommendations
import org.niit.util.SparkUtil

/**
 * Date:2025/6/12
 * Author：Ys
 * Description:
 */
class EDUBatchService {
  import org.apache.spark.sql.functions._



  def dataAnalysis(): Unit = {
    val spark = SparkUtil.takeSpark()
    import spark.implicits._


    //1.利用Spark SQL 读取 edu数据表
    val data: Dataset[AnswerWithRecommendations] = spark.read.format("jdbc")
      .option("url", "jdbc:mysql://node1:3306/BD1_2025?useUnicode=true&characterEncoding=utf8")
      .option("driver", "com.mysql.jdbc.Driver")
      .option("user", "root")
      .option("password", "Niit@123")
      .option("dbtable", "edu")
      .load().as[AnswerWithRecommendations]

    hotQuestionWithSubject(data)

    hotSubjectRecommendTop20(data)
  }

  //需求一：各科目热点题分析
  def hotQuestionWithSubject(data: Dataset[AnswerWithRecommendations]): Unit = {
    //1.找到前50道热点题，统计出现次数最多的前50道题目
    val hotTop50: Dataset[Row] = data.groupBy("question_id")
      .agg(count("question_id").as("hot"))
      .orderBy(desc("hot"))
      .limit(50)

    //2.将hotTop50 和 data进行关联，来取得到热点题的所属科目。dropDuplicates：将重复的题目进行去重
    val joinDF: Dataset[Row] = hotTop50.join(data.dropDuplicates("question_id"), "question_id")

    //3.按照学科分组聚合统计各个学生包含的热点题数量
    val res: Dataset[Row] = joinDF.groupBy("subject_id")
      .agg(count("*") as "hotCount")
      .orderBy(desc("hotCount"))

    //自行存到数据库
    res.show()
  }
  /*需求二：各科目推荐题分析
  要求:找到Top20热点题对应的推荐题目，然后找到推荐题目对应的科目，并统计每个科目分别包含推荐题目的条数
   */
  def hotSubjectRecommendTop20(data: Dataset[AnswerWithRecommendations]): Unit = {

    //1.找到前20的热点题
    val hotTop20: Dataset[Row] = data.groupBy("question_id").agg(count("question_id").as("hot"))
      .orderBy(desc("hot")).limit(20)

    //2.将前20的热点题和data进行关联，得到热点题的推荐题目
    val ridsDF: DataFrame = hotTop20.join(data, "question_id").select("recommendations")

    //recommendations = " 题目ID_1500,题目ID_1878,题目ID_1554,题目ID_577,题目ID_1288,题目ID_1280,题目ID_305,题目ID_1136,题目ID_725,题目ID_1589"
    //3.将ridsDF 转换数据
    //explode:将数据进行展开
    val ridsDS =  ridsDF.select( explode(  split(col("recommendations"), ",")  )  as  "question_id"  )
      .dropDuplicates("question_id")

    //4.将ridsDS 和 data进行关联, 得到每个推荐题目的所属科目
    val ridAndSid: DataFrame = ridsDS.join(data.dropDuplicates("question_id"), "question_id")

    //5.统计各个科目包含推荐题目的数量，并降序排序
    val res: Dataset[Row] = ridAndSid.groupBy("subject_id").agg(count("*") as "rcount").orderBy(desc("rcount"))

    res.show()


  }

}
