package org.niit.service

import org.apache.spark.sql.{DataFrame, Dataset, Row, SaveMode, SparkSession}
import org.niit.common.TService
import org.niit.util.SparkUtil

/**
 * Date:2025/6/5
 * Author：Ys
 * Description:
 */
class MovieDataService extends TService [Dataset[String]]{

  import spark.implicits._
  import org.apache.spark.sql.functions._


  override def dataAnalysis(data:Dataset[String]): Unit = {


  //1.处理数据 -- 先获得电影id ,评分


    data.show()
    val movieDF: DataFrame = data.map(line => {
      //196	242	3	881250949
      val arrs: Array[String] = line.split("\t")
      val moviveId = arrs(1)
      val score = arrs(2).toInt
      (moviveId, score)

    }).toDF("movieId", "score")

    //第一种 利用SQL对电影进行分析 ，获得电影的平均的评分Top10,要求电影次数大于 200
    /*
    movieDF.createOrReplaceTempView("movies")
    val sql =
      """
        |
        |select movieId,avg(score) as avgScore,count(*) as counts
        |from movies
        |group by movieId
        |having counts > 200
        |order by avgScore desc
        |limit 10
        |""".stripMargin

    val frame: DataFrame = spark.sql(sql)

     */


    //第二种方式 利用DSL做分析 获得电影的平均的评分Top10,要求电影次数大于 200

    import org.apache.spark.sql.functions._ // 导入DSL的全部函数

    val frame: Dataset[Row] = movieDF.groupBy("movieId")
      .agg(avg("score") as "avgScore", count("movieId") as "counts")
      .filter("counts > 200")
      .orderBy( desc("avgScore") )
      .limit(10)


    //将结果数据存入数据库当中
    frame.write.format("jdbc")
      .option("url", "jdbc:mysql://node1:3306/BD1_2025")
      .option("driver", "com.mysql.jdbc.Driver")
      .option("user", "root")
      .option("password", "Niit@123")
      .option("dbtable", "movie") // 写入数据库的新表名
      .mode(SaveMode.Append) //  指定写入模式
      .save()



  }

}
