package org.niit.service

import org.apache.spark.sql.{DataFrame, Dataset, SaveMode}
import org.apache.spark.streaming.dstream.DStream
import org.niit.common.TService
import org.niit.util.SparkUtil

/**
 * Date:2025/6/10
 * Author：Ys
 * Description:
 */
class MovieDataService extends TService[ Dataset[String] ]{


  override def dataAnalysis(data:Dataset[String]): Unit ={
    val spark = SparkUtil.takeSpark()
    import spark.implicits._ //  隐式转换
    //1.处理数据 找到对应 键 值    电影ID key   评分 value
    //196	242	3	881250949   ===>>  (242,3)
    val movieDF: DataFrame = data.map(line => {
      val arrs: Array[String] = line.split("\t")
      val movieId = arrs(1)
      val score = arrs(2).toInt
      (movieId, score)
    }).toDF("movieId", "score")

    //第一种，利用SQL语句去做分析
      //1. 创建一张临时表
    movieDF.createOrReplaceTempView("movies")
    val sql =
      """
        |select movieId,avg(score) as avgScore,count(*) as counts
        |from movies
        |group by movieId
        |having counts > 200
        |order by avgScore desc
        |limit 10
        |""".stripMargin



    var resFrame: DataFrame = spark.sql(sql)


    //第二种方式，使用DSL语法进行数据分析
    import org.apache.spark.sql.functions._//  导入DSL全部函数
    resFrame = movieDF.groupBy("movieId")
      .agg(
        avg("score").as("avgScore"),
        count("movieId").as("counts")
      )
      .filter("counts>200")
      .orderBy( desc("avgScore") )
      .limit(10)



    //存入数据库

    resFrame.write
      .format("jdbc")
      .option("url", "jdbc:mysql://node1:3306/BD2_2025")
      .option("driver", "com.mysql.jdbc.Driver")
      .option("user", "root")
      .option("password", "Niit@123")
      .option("dbtable", "movies")
      .mode(SaveMode.Append) // 当数据表不存在的时候会自动创建
      .save()

  }

}
