package movie

import org.apache.spark.sql.SaveMode
//统计某个具体年份最受欢迎的10部电影（以2015年为例）

object task6 {
  def main(args: Array[String]): Unit = {
    import org.apache.spark.sql.SparkSession
    val spark=SparkSession.builder()
      .appName("certain_year_movie")
      .master("spark://niit-master:7077")
      .config("hive.metastore.uris","thrift://niit-master:9083")
      //      .config("spark.driver.host","10.10.4.28")
      .enableHiveSupport()
      .getOrCreate()

    //从mysql数据库读取数据
//    val csvDF1 = spark.read
//      .format("jdbc")
//      .option("url", "jdbc:mysql://niit-master:3306/sem7_sparkpj")
//      .option("driver", "com.mysql.jdbc.Driver")
//      .option("user", "root")
//      .option("password", "root")
//      .option("dbtable", "movies")
//      .load()
//    val csvDF2 = spark.read
//      .format("jdbc")
//      .option("url", "jdbc:mysql://niit-master:3306/sem7_sparkpj")
//      .option("driver", "com.mysql.jdbc.Driver")
//      .option("user", "root")
//      .option("password", "root")
//      .option("dbtable", "ratings")
//      .load()

    //hdfs读取数据
    val csvFile1 = Seq("hdfs://niit-master/spark/movies.csv")
    val csvDF1 = spark.read.format("csv").option("header", true).load(csvFile1.mkString(","))
    val csvFile2 = Seq("hdfs://niit-master/spark/ratings.csv")
    val csvDF2 = spark.read.format("csv").option("header", true).load(csvFile2.mkString(","))

    //建立视图
    csvDF1.createOrReplaceTempView("movies")
    csvDF2.createOrReplaceTempView("ratings")

    //spqrksql语句
    val result =
      """
        |SELECT
        |  movies.title,avg(ratings.rating) as avg_rating
        |FROM
        |  movies
        |JOIN
        |  ratings ON movies.movieId = ratings.movieId
        |WHERE
        |  YEAR(FROM_UNIXTIME(timestamp)) = 2015
        |GROUP BY movies.title
        |ORDER BY
        |  avg_rating DESC
        |LIMIT 10
        |""".stripMargin
    val df = spark.sql(result)
    df.show(false)

    //将结果保存到mysql数据库
    df.write
      .format("jdbc")
      .option("url", "jdbc:mysql://niit-master:3306/sem7_sparkpj")
      .option("driver", "com.mysql.jdbc.Driver")
      .option("user", "root")
      .option("password", "root")
      .option("dbtable", "result6")
      .mode(SaveMode.Overwrite)
      .save()

    //关闭环境
    spark.close()
  }

}
