package movie
//统计各个类型电影的平均评分

import org.apache.spark.sql.SaveMode

object task4 {
  def main(args: Array[String]): Unit = {
    import org.apache.spark.sql.SparkSession
    val spark=SparkSession.builder()
      .appName("genres_avg_rating")
      .master("spark://niit-master:7077")
      .config("hive.metastore.uris","thrift://niit-master:9083")
      //      .config("spark.driver.host","10.10.4.28")
      .enableHiveSupport()
      .getOrCreate()

    // 从mysql数据库读取数据
//    val csvDF1 = spark.read
//      .format("jdbc")
//      .option("url", "jdbc:mysql://niit-master:3306/sem7_sparkpj")
//      .option("driver", "com.mysql.jdbc.Driver")
//      .option("user", "root")
//      .option("password", "root")
//      .option("dbtable", "movies")
//      .load()
//
//    val csvDF2 = spark.read
//      .format("jdbc")
//      .option("url", "jdbc:mysql://niit-master:3306/sem7_sparkpj")
//      .option("driver", "com.mysql.jdbc.Driver")
//      .option("user", "root")
//      .option("password", "root")
//      .option("dbtable", "ratings")
//      .load()

    //hdfs读取数据
    val csvFile1 = Seq("hdfs://niit-master/spark/movies.csv")
    val csvDF1 = spark.read.format("csv").option("header", true).load(csvFile1.mkString(","))
    val csvFile2 = Seq("hdfs://niit-master/spark/ratings.csv")
    val csvDF2 = spark.read.format("csv").option("header", true).load(csvFile2.mkString(","))
    //建立视图
    csvDF1.createOrReplaceTempView("movies")
    csvDF2.createOrReplaceTempView("ratings")

    //sparksql语句
    val result =
      """
        |SELECT genres_split,avg(rating) as avg_rating
        |FROM
        |(
        |SELECT movieid, SUBSTRING_INDEX(SUBSTRING_INDEX(movies.genres,'|',numbers.n),'|',-1) as genres_split
        |FROM (SELECT 1 n UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4) numbers
        |INNER JOIN movies
        |ON CHAR_LENGTH(movies.genres) - CHAR_LENGTH(REPLACE(movies.genres, '|', '')) >= numbers.n-1
        |ORDER BY movieid,genres_split
        |)t
        |JOIN ratings ON t.movieId=ratings.movieId
        |GROUP BY genres_split
        |""".stripMargin

    val df = spark.sql(result)
    df.show(false)

    //将结果保存到mysql数据库
    df.write
      .format("jdbc")
      .option("url", "jdbc:mysql://niit-master:3306/sem7_sparkpj")
      .option("driver", "com.mysql.jdbc.Driver")
      .option("user", "root")
      .option("password", "root")
      .option("dbtable", "result4")
      .mode(SaveMode.Overwrite)
      .save()

    //关闭环境
    spark.close()
  }

}
