package read.niit.service

import org.apache.spark.sql.{Dataset, SaveMode, SparkSession}
import org.apache.spark.sql.functions.count
import read.niit.bean.ReaderWithRecommendations
import read.niit.dao.BatchReaderDao
import read.niit.util.SparkUtil

/**
 * 需求一(离线数据)：
 * 统计所有大学生中阅读最受欢迎的前50本书对应的类型
 * 然后统计这些类型中，分别包含热点书的数量 DSL语法实现
 */

class BatchReadBookCountTop50Service {

  private val spark: SparkSession = SparkUtil.takeSpark()

  import spark.implicits._
  import org.apache.spark.sql.functions._

  def dataAnalysis(): Unit ={

    val readDao = new BatchReaderDao
    val allInfoDS = readDao.getReaderData()

    hotread_SubjectCountTop50(allInfoDS)
  }

  def hotread_SubjectCountTop50(allInfoDS:Dataset[ReaderWithRecommendations]): Unit ={
    val hotTop50 =  allInfoDS.groupBy("book_id")
      .agg(count("*") as "hotCount")
      .orderBy('hotCount.desc)
      .limit(50)
    //2.2将hotTop10和allInfoDS进行关联，得到热门书对应的书籍类型  dropDuplicates:去重
    val joinDF = hotTop50.join( allInfoDS.dropDuplicates("book_id"),"book_id")
    //2.3按阅读类型分组聚合统计各个类型包含热点书籍的数量
    val res = joinDF.groupBy("read_subject_id")
      .agg(count("*") as "hotCount")
      .orderBy('hotCount.desc)
    res.show()

    res.write
      .format("jdbc")
      .option("url","jdbc:mysql://node1:3306/BD2?useUnicode=true&characterEncoding=utf8")
      .option("driver","com.mysql.jdbc.Driver")
      .option("user","root")
      .option("password","Niit@123")
      .option("dbtable","hotCountTable")//写到User2表里面
      .mode(SaveMode.Append)//追加模式，如果该表不存在就会自动的创建
      .save()



  }

}
