package movie
//找出所有电影中评分数量最多的前 10 部电影，并统计其平均评分

import org.apache.spark.sql.SaveMode

object task1 {
  def main(args: Array[String]): Unit = {
    import org.apache.spark.{SparkConf, SparkContext}
    import org.apache.spark.sql.SparkSession
    val spark=SparkSession.builder()
      .appName("rate_num_top10")
      .master("spark://niit-master:7077")
      .config("hive.metastore.uris","thrift://niit-master:9083")
//      .config("spark.driver.host","10.10.4.28")
      .enableHiveSupport()
      .getOrCreate()

    //hdfs读取数据
    val csvFile1 = Seq("hdfs://niit-master/spark/movies.csv")
    val csvDF1 = spark.read.format("csv").option("header", true).load(csvFile1.mkString(","))
    val csvFile2 = Seq("hdfs://niit-master/spark/ratings.csv")
    val csvDF2 = spark.read.format("csv").option("header", true).load(csvFile1.mkString(","))

    //从mysql数据库读取数据
//    val csvDF1 = spark.read
//      .format("jdbc")
//      .option("url", "jdbc:mysql://niit-master:3306/sem7_sparkpj")
//      .option("driver", "com.mysql.jdbc.Driver")
//      .option("user", "root")
//      .option("password", "root")
//      .option("dbtable", "movies")
//      .load()
//    val csvDF2 = spark.read
//      .format("jdbc")
//      .option("url", "jdbc:mysql://niit-master:3306/sem7_sparkpj")
//      .option("driver", "com.mysql.jdbc.Driver")
//      .option("user", "root")
//      .option("password", "root")
//      .option("dbtable", "ratings")
//      .load()
    //建立视图
    csvDF1.createOrReplaceTempView("movies")
    csvDF2.createOrReplaceTempView("ratings")

    //spqrksql语句
    val result =
      """
        |SELECT m.title, AVG(r.rating) AS avg_rating, COUNT(r.movieid) AS rating_count
        |FROM movies m JOIN ratings r ON m.movieid = r.movieid
        |GROUP BY m.title
        |ORDER BY rating_count DESC,avg_rating DESC
        |LIMIT 10
        |""".stripMargin
    val df = spark.sql(result)
    df.show(false)

    //将结果保存到本地路径
//    df.write.parquet("output/movie/result1")

    //将结果保存到mysql数据库
    df.write
      .format("jdbc")
      .option("url", "jdbc:mysql://niit-master:3306/sem7_sparkpj")
      .option("driver", "com.mysql.jdbc.Driver")
      .option("user", "root")
      .option("password", "root")
      .option("dbtable", "result1")
      .mode(SaveMode.Overwrite)
      .save()

    df.rdd.coalesce(1).saveAsTextFile("hdfs://niit-master/spark/test")

    //关闭环境
    spark.close()
  }
}
