package org.niit.service

import org.apache.spark.sql.Dataset
import org.niit.util.SparkUtil

class MovieDataService {

  def dataAnalysis(): Unit ={
    //1.加载数据  利用sparkSession
    val spakr = SparkUtil.takeSpark()
    val ds: Dataset[String] = spakr.read.textFile("input/rating_100k.data")
    import spakr.implicits._ //隐式转换

    //2.处理数据--先获得电影id ,评分【DataFrame】
    val movieDF =  ds.map(line => {
      // line =  196	242	3	881250949
      val arrs: Array[String] = line.split("\t")
      val movieId =  arrs(1)
      val score = arrs(2).toInt
      (movieId,score)
    }).toDF("movieId","score")

    //3.将处理好的数据，变成一张表
    movieDF.createOrReplaceTempView("movies")
    //4.1利用SQL的方式去分析数据
    //电影评分数据进行分析，获得电影平均分Top10,要求电影次数大于200
    val sql  =
      """
        |select movieId,avg(score) as avgScore,count(*) as counts
        |from movies
        |group by movieId
        |having counts > 200
        |order by avgScore desc
        |limit 10
        |""".stripMargin

    spakr.sql(sql).show()

    //4.2利用DSL的方式去分析数据
    //电影评分数据进行分析，获得电影平均分Top10,要求电影次数大于200
    import org.apache.spark.sql.functions._  //导入DSL的全部函数
    movieDF.groupBy('movieId)
      .agg(
        avg('score) as("avgScore"),
        count("movieId") as("counts")
      ).filter('counts>200)
      .orderBy('avgScore.desc)
      .limit(10)
      .show()

  }

}
