package movie

import org.apache.spark.sql.SaveMode
////统计平均评分最高的10个年份

object task5 {
  def main(args: Array[String]): Unit = {
    import org.apache.spark.sql.SparkSession
    val spark=SparkSession.builder()
      .appName("year_avg_rating_top10")
      .master("spark://niit-master:7077")
      .config("hive.metastore.uris","thrift://niit-master:9083")
      //      .config("spark.driver.host","10.10.4.28")
      .enableHiveSupport()
      .getOrCreate()

    //    从hdfs读取数据
    val csvFile1 = Seq("hdfs://niit-master/spark/ratings.csv")
    val csvDF1 = spark.read.format("csv").option("header", true).load(csvFile1.mkString(","))


    // 从mysql数据库读取数据
    //    val csvDF1 = spark.read
    //      .format("jdbc")
    //      .option("url", "jdbc:mysql://niit-master:3306/sem7_sparkpj")
    //      .option("driver", "com.mysql.jdbc.Driver")
    //      .option("user", "root")
    //      .option("password", "root")
    //      .option("dbtable", "ratings")
    //      .load()

    //建立视图
    csvDF1.createOrReplaceTempView("ratings")

    //sparksql语句
    val result=
    """
        |SELECT
        |YEAR(FROM_UNIXTIME(timestamp)) as movie_year,
        |avg(rating) as avg_rating
        |from ratings
        |GROUP BY movie_year
        |ORDER BY avg_rating desc
        |LIMIT 10
        |""".stripMargin

    val df=spark.sql(result)
    df.show(false)

    //将结果保存到mysql数据库
    df.write
      .format("jdbc")
      .option("url", "jdbc:mysql://niit-master:3306/sem7_sparkpj")
      .option("driver", "com.mysql.jdbc.Driver")
      .option("user", "root")
      .option("password", "root")
      .option("dbtable", "result5")
      .mode(SaveMode.Overwrite)
      .save()


    //关闭环境
    spark.close()
  }

}
